1 /*-
2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/10.2/sys/geom/raid/tr_raid1.c 260385 2014-01-07 01:32:23Z scottl $");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/kobj.h>
35 #include <sys/limits.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <geom/geom.h>
42 #include "geom/raid/g_raid.h"
43 #include "g_raid_tr_if.h"
44
45 SYSCTL_DECL(_kern_geom_raid_raid1);
46
47 #define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
48 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
49 TUNABLE_INT("kern.geom.raid.raid1.rebuild_slab_size",
50 &g_raid1_rebuild_slab);
51 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RW,
52 &g_raid1_rebuild_slab, 0,
53 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
54
55 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
56 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
57 TUNABLE_INT("kern.geom.raid.raid1.rebuild_fair_io",
58 &g_raid1_rebuild_fair_io);
59 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RW,
60 &g_raid1_rebuild_fair_io, 0,
61 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
62
63 #define RAID1_REBUILD_CLUSTER_IDLE 100
64 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
65 TUNABLE_INT("kern.geom.raid.raid1.rebuild_cluster_idle",
66 &g_raid1_rebuild_cluster_idle);
67 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RW,
68 &g_raid1_rebuild_cluster_idle, 0,
69 "Number of slabs to do each time we trigger a rebuild cycle");
70
71 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
72 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
73 TUNABLE_INT("kern.geom.raid.raid1.rebuild_meta_update",
74 &g_raid1_rebuild_meta_update);
75 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RW,
76 &g_raid1_rebuild_meta_update, 0,
77 "When to update the meta data.");
78
79 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
80
81 #define TR_RAID1_NONE 0
82 #define TR_RAID1_REBUILD 1
83 #define TR_RAID1_RESYNC 2
84
85 #define TR_RAID1_F_DOING_SOME 0x1
86 #define TR_RAID1_F_LOCKED 0x2
87 #define TR_RAID1_F_ABORT 0x4
88
89 struct g_raid_tr_raid1_object {
90 struct g_raid_tr_object trso_base;
91 int trso_starting;
92 int trso_stopping;
93 int trso_type;
94 int trso_recover_slabs; /* slabs before rest */
95 int trso_fair_io;
96 int trso_meta_update;
97 int trso_flags;
98 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
99 void *trso_buffer; /* Buffer space */
100 struct bio trso_bio;
101 };
102
103 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
104 static g_raid_tr_event_t g_raid_tr_event_raid1;
105 static g_raid_tr_start_t g_raid_tr_start_raid1;
106 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
107 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
108 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
109 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
110 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
111 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
112 static g_raid_tr_free_t g_raid_tr_free_raid1;
113
114 static kobj_method_t g_raid_tr_raid1_methods[] = {
115 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
116 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
117 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
118 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
119 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
120 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
121 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
122 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
123 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
124 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
125 { 0, 0 }
126 };
127
128 static struct g_raid_tr_class g_raid_tr_raid1_class = {
129 "RAID1",
130 g_raid_tr_raid1_methods,
131 sizeof(struct g_raid_tr_raid1_object),
132 .trc_enable = 1,
133 .trc_priority = 100,
134 .trc_accept_unmapped = 1
135 };
136
137 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
138 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
139 struct g_raid_subdisk *sd);
140
141 static int
142 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
143 {
144 struct g_raid_tr_raid1_object *trs;
145
146 trs = (struct g_raid_tr_raid1_object *)tr;
147 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
148 (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
149 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
150 return (G_RAID_TR_TASTE_FAIL);
151 trs->trso_starting = 1;
152 return (G_RAID_TR_TASTE_SUCCEED);
153 }
154
155 static int
156 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
157 struct g_raid_subdisk *sd)
158 {
159 struct g_raid_tr_raid1_object *trs;
160 struct g_raid_softc *sc;
161 struct g_raid_subdisk *tsd, *bestsd;
162 u_int s;
163 int i, na, ns;
164
165 sc = vol->v_softc;
166 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
167 if (trs->trso_stopping &&
168 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
169 s = G_RAID_VOLUME_S_STOPPED;
170 else if (trs->trso_starting)
171 s = G_RAID_VOLUME_S_STARTING;
172 else {
173 /* Make sure we have at least one ACTIVE disk. */
174 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
175 if (na == 0) {
176 /*
177 * Critical situation! We have no any active disk!
178 * Choose the best disk we have to make it active.
179 */
180 bestsd = &vol->v_subdisks[0];
181 for (i = 1; i < vol->v_disks_count; i++) {
182 tsd = &vol->v_subdisks[i];
183 if (tsd->sd_state > bestsd->sd_state)
184 bestsd = tsd;
185 else if (tsd->sd_state == bestsd->sd_state &&
186 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
187 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
188 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
189 bestsd = tsd;
190 }
191 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
192 /* We found reasonable candidate. */
193 G_RAID_DEBUG1(1, sc,
194 "Promote subdisk %s:%d from %s to ACTIVE.",
195 vol->v_name, bestsd->sd_pos,
196 g_raid_subdisk_state2str(bestsd->sd_state));
197 g_raid_change_subdisk_state(bestsd,
198 G_RAID_SUBDISK_S_ACTIVE);
199 g_raid_write_metadata(sc,
200 vol, bestsd, bestsd->sd_disk);
201 }
202 }
203 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
204 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
205 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
206 if (na == vol->v_disks_count)
207 s = G_RAID_VOLUME_S_OPTIMAL;
208 else if (na + ns == vol->v_disks_count)
209 s = G_RAID_VOLUME_S_SUBOPTIMAL;
210 else if (na > 0)
211 s = G_RAID_VOLUME_S_DEGRADED;
212 else
213 s = G_RAID_VOLUME_S_BROKEN;
214 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
215 }
216 if (s != vol->v_state) {
217 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
218 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
219 G_RAID_EVENT_VOLUME);
220 g_raid_change_volume_state(vol, s);
221 if (!trs->trso_starting && !trs->trso_stopping)
222 g_raid_write_metadata(sc, vol, NULL, NULL);
223 }
224 return (0);
225 }
226
227 static void
228 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
229 struct g_raid_disk *disk)
230 {
231 /*
232 * We don't fail the last disk in the pack, since it still has decent
233 * data on it and that's better than failing the disk if it is the root
234 * file system.
235 *
236 * XXX should this be controlled via a tunable? It makes sense for
237 * the volume that has / on it. I can't think of a case where we'd
238 * want the volume to go away on this kind of event.
239 */
240 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
241 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
242 return;
243 g_raid_fail_disk(sc, sd, disk);
244 }
245
246 static void
247 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
248 {
249 struct g_raid_tr_raid1_object *trs;
250 struct g_raid_subdisk *sd, *good_sd;
251 struct bio *bp;
252
253 trs = (struct g_raid_tr_raid1_object *)tr;
254 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
255 return;
256 sd = trs->trso_failed_sd;
257 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
258 if (good_sd == NULL) {
259 g_raid_tr_raid1_rebuild_abort(tr);
260 return;
261 }
262 bp = &trs->trso_bio;
263 memset(bp, 0, sizeof(*bp));
264 bp->bio_offset = sd->sd_rebuild_pos;
265 bp->bio_length = MIN(g_raid1_rebuild_slab,
266 sd->sd_size - sd->sd_rebuild_pos);
267 bp->bio_data = trs->trso_buffer;
268 bp->bio_cmd = BIO_READ;
269 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
270 bp->bio_caller1 = good_sd;
271 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
272 trs->trso_flags |= TR_RAID1_F_LOCKED;
273 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
274 bp->bio_offset, bp->bio_length, NULL, bp);
275 }
276
277 static void
278 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
279 {
280 struct g_raid_volume *vol;
281 struct g_raid_subdisk *sd;
282
283 vol = trs->trso_base.tro_volume;
284 sd = trs->trso_failed_sd;
285 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
286 free(trs->trso_buffer, M_TR_RAID1);
287 trs->trso_buffer = NULL;
288 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
289 trs->trso_type = TR_RAID1_NONE;
290 trs->trso_recover_slabs = 0;
291 trs->trso_failed_sd = NULL;
292 g_raid_tr_update_state_raid1(vol, NULL);
293 }
294
295 static void
296 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
297 {
298 struct g_raid_tr_raid1_object *trs;
299 struct g_raid_subdisk *sd;
300
301 trs = (struct g_raid_tr_raid1_object *)tr;
302 sd = trs->trso_failed_sd;
303 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
304 "Subdisk %s:%d-%s rebuild completed.",
305 sd->sd_volume->v_name, sd->sd_pos,
306 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
307 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
308 sd->sd_rebuild_pos = 0;
309 g_raid_tr_raid1_rebuild_done(trs);
310 }
311
312 static void
313 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
314 {
315 struct g_raid_tr_raid1_object *trs;
316 struct g_raid_subdisk *sd;
317 struct g_raid_volume *vol;
318 off_t len;
319
320 vol = tr->tro_volume;
321 trs = (struct g_raid_tr_raid1_object *)tr;
322 sd = trs->trso_failed_sd;
323 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
324 G_RAID_DEBUG1(1, vol->v_softc,
325 "Subdisk %s:%d-%s rebuild is aborting.",
326 sd->sd_volume->v_name, sd->sd_pos,
327 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
328 trs->trso_flags |= TR_RAID1_F_ABORT;
329 } else {
330 G_RAID_DEBUG1(0, vol->v_softc,
331 "Subdisk %s:%d-%s rebuild aborted.",
332 sd->sd_volume->v_name, sd->sd_pos,
333 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
334 trs->trso_flags &= ~TR_RAID1_F_ABORT;
335 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
336 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
337 len = MIN(g_raid1_rebuild_slab,
338 sd->sd_size - sd->sd_rebuild_pos);
339 g_raid_unlock_range(tr->tro_volume,
340 sd->sd_rebuild_pos, len);
341 }
342 g_raid_tr_raid1_rebuild_done(trs);
343 }
344 }
345
346 static void
347 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
348 {
349 struct g_raid_volume *vol;
350 struct g_raid_tr_raid1_object *trs;
351 struct g_raid_subdisk *sd, *fsd;
352
353 vol = tr->tro_volume;
354 trs = (struct g_raid_tr_raid1_object *)tr;
355 if (trs->trso_failed_sd) {
356 G_RAID_DEBUG1(1, vol->v_softc,
357 "Already rebuild in start rebuild. pos %jd\n",
358 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
359 return;
360 }
361 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
362 if (sd == NULL) {
363 G_RAID_DEBUG1(1, vol->v_softc,
364 "No active disk to rebuild. night night.");
365 return;
366 }
367 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
368 if (fsd == NULL)
369 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
370 if (fsd == NULL) {
371 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
372 if (fsd != NULL) {
373 fsd->sd_rebuild_pos = 0;
374 g_raid_change_subdisk_state(fsd,
375 G_RAID_SUBDISK_S_RESYNC);
376 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
377 } else {
378 fsd = g_raid_get_subdisk(vol,
379 G_RAID_SUBDISK_S_UNINITIALIZED);
380 if (fsd == NULL)
381 fsd = g_raid_get_subdisk(vol,
382 G_RAID_SUBDISK_S_NEW);
383 if (fsd != NULL) {
384 fsd->sd_rebuild_pos = 0;
385 g_raid_change_subdisk_state(fsd,
386 G_RAID_SUBDISK_S_REBUILD);
387 g_raid_write_metadata(vol->v_softc,
388 vol, fsd, NULL);
389 }
390 }
391 }
392 if (fsd == NULL) {
393 G_RAID_DEBUG1(1, vol->v_softc,
394 "No failed disk to rebuild. night night.");
395 return;
396 }
397 trs->trso_failed_sd = fsd;
398 G_RAID_DEBUG1(0, vol->v_softc,
399 "Subdisk %s:%d-%s rebuild start at %jd.",
400 fsd->sd_volume->v_name, fsd->sd_pos,
401 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
402 trs->trso_failed_sd->sd_rebuild_pos);
403 trs->trso_type = TR_RAID1_REBUILD;
404 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
405 trs->trso_meta_update = g_raid1_rebuild_meta_update;
406 g_raid_tr_raid1_rebuild_some(tr);
407 }
408
409
410 static void
411 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
412 struct g_raid_subdisk *sd)
413 {
414 struct g_raid_volume *vol;
415 struct g_raid_tr_raid1_object *trs;
416 int na, nr;
417
418 /*
419 * If we're stopping, don't do anything. If we don't have at least one
420 * good disk and one bad disk, we don't do anything. And if there's a
421 * 'good disk' stored in the trs, then we're in progress and we punt.
422 * If we make it past all these checks, we need to rebuild.
423 */
424 vol = tr->tro_volume;
425 trs = (struct g_raid_tr_raid1_object *)tr;
426 if (trs->trso_stopping)
427 return;
428 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
429 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
430 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
431 switch(trs->trso_type) {
432 case TR_RAID1_NONE:
433 if (na == 0)
434 return;
435 if (nr == 0) {
436 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
437 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
438 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
439 if (nr == 0)
440 return;
441 }
442 g_raid_tr_raid1_rebuild_start(tr);
443 break;
444 case TR_RAID1_REBUILD:
445 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
446 g_raid_tr_raid1_rebuild_abort(tr);
447 break;
448 case TR_RAID1_RESYNC:
449 break;
450 }
451 }
452
453 static int
454 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
455 struct g_raid_subdisk *sd, u_int event)
456 {
457
458 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
459 return (0);
460 }
461
462 static int
463 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
464 {
465 struct g_raid_tr_raid1_object *trs;
466 struct g_raid_volume *vol;
467
468 trs = (struct g_raid_tr_raid1_object *)tr;
469 vol = tr->tro_volume;
470 trs->trso_starting = 0;
471 g_raid_tr_update_state_raid1(vol, NULL);
472 return (0);
473 }
474
475 static int
476 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
477 {
478 struct g_raid_tr_raid1_object *trs;
479 struct g_raid_volume *vol;
480
481 trs = (struct g_raid_tr_raid1_object *)tr;
482 vol = tr->tro_volume;
483 trs->trso_starting = 0;
484 trs->trso_stopping = 1;
485 g_raid_tr_update_state_raid1(vol, NULL);
486 return (0);
487 }
488
489 /*
490 * Select the disk to read from. Take into account: subdisk state, running
491 * error recovery, average disk load, head position and possible cache hits.
492 */
493 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
494 static struct g_raid_subdisk *
495 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
496 u_int mask)
497 {
498 struct g_raid_subdisk *sd, *best;
499 int i, prio, bestprio;
500
501 best = NULL;
502 bestprio = INT_MAX;
503 for (i = 0; i < vol->v_disks_count; i++) {
504 sd = &vol->v_subdisks[i];
505 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
506 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
507 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
508 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
509 continue;
510 if ((mask & (1 << i)) != 0)
511 continue;
512 prio = G_RAID_SUBDISK_LOAD(sd);
513 prio += min(sd->sd_recovery, 255) << 22;
514 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
515 /* If disk head is precisely in position - highly prefer it. */
516 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
517 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
518 else
519 /* If disk head is close to position - prefer it. */
520 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
521 G_RAID_SUBDISK_TRACK_SIZE)
522 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
523 if (prio < bestprio) {
524 best = sd;
525 bestprio = prio;
526 }
527 }
528 return (best);
529 }
530
531 static void
532 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
533 {
534 struct g_raid_subdisk *sd;
535 struct bio *cbp;
536
537 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
538 KASSERT(sd != NULL, ("No active disks in volume %s.",
539 tr->tro_volume->v_name));
540
541 cbp = g_clone_bio(bp);
542 if (cbp == NULL) {
543 g_raid_iodone(bp, ENOMEM);
544 return;
545 }
546
547 g_raid_subdisk_iostart(sd, cbp);
548 }
549
550 static void
551 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
552 {
553 struct g_raid_volume *vol;
554 struct g_raid_subdisk *sd;
555 struct bio_queue_head queue;
556 struct bio *cbp;
557 int i;
558
559 vol = tr->tro_volume;
560
561 /*
562 * Allocate all bios before sending any request, so we can return
563 * ENOMEM in nice and clean way.
564 */
565 bioq_init(&queue);
566 for (i = 0; i < vol->v_disks_count; i++) {
567 sd = &vol->v_subdisks[i];
568 switch (sd->sd_state) {
569 case G_RAID_SUBDISK_S_ACTIVE:
570 break;
571 case G_RAID_SUBDISK_S_REBUILD:
572 /*
573 * When rebuilding, only part of this subdisk is
574 * writable, the rest will be written as part of the
575 * that process.
576 */
577 if (bp->bio_offset >= sd->sd_rebuild_pos)
578 continue;
579 break;
580 case G_RAID_SUBDISK_S_STALE:
581 case G_RAID_SUBDISK_S_RESYNC:
582 /*
583 * Resyncing still writes on the theory that the
584 * resync'd disk is very close and writing it will
585 * keep it that way better if we keep up while
586 * resyncing.
587 */
588 break;
589 default:
590 continue;
591 }
592 cbp = g_clone_bio(bp);
593 if (cbp == NULL)
594 goto failure;
595 cbp->bio_caller1 = sd;
596 bioq_insert_tail(&queue, cbp);
597 }
598 while ((cbp = bioq_takefirst(&queue)) != NULL) {
599 sd = cbp->bio_caller1;
600 cbp->bio_caller1 = NULL;
601 g_raid_subdisk_iostart(sd, cbp);
602 }
603 return;
604 failure:
605 while ((cbp = bioq_takefirst(&queue)) != NULL)
606 g_destroy_bio(cbp);
607 if (bp->bio_error == 0)
608 bp->bio_error = ENOMEM;
609 g_raid_iodone(bp, bp->bio_error);
610 }
611
612 static void
613 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
614 {
615 struct g_raid_volume *vol;
616 struct g_raid_tr_raid1_object *trs;
617
618 vol = tr->tro_volume;
619 trs = (struct g_raid_tr_raid1_object *)tr;
620 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
621 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
622 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
623 g_raid_iodone(bp, EIO);
624 return;
625 }
626 /*
627 * If we're rebuilding, squeeze in rebuild activity every so often,
628 * even when the disk is busy. Be sure to only count real I/O
629 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
630 * by this module.
631 */
632 if (trs->trso_failed_sd != NULL &&
633 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
634 /* Make this new or running now round short. */
635 trs->trso_recover_slabs = 0;
636 if (--trs->trso_fair_io <= 0) {
637 trs->trso_fair_io = g_raid1_rebuild_fair_io;
638 g_raid_tr_raid1_rebuild_some(tr);
639 }
640 }
641 switch (bp->bio_cmd) {
642 case BIO_READ:
643 g_raid_tr_iostart_raid1_read(tr, bp);
644 break;
645 case BIO_WRITE:
646 case BIO_DELETE:
647 g_raid_tr_iostart_raid1_write(tr, bp);
648 break;
649 case BIO_FLUSH:
650 g_raid_tr_flush_common(tr, bp);
651 break;
652 default:
653 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
654 bp->bio_cmd, vol->v_name));
655 break;
656 }
657 }
658
659 static void
660 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
661 struct g_raid_subdisk *sd, struct bio *bp)
662 {
663 struct bio *cbp;
664 struct g_raid_subdisk *nsd;
665 struct g_raid_volume *vol;
666 struct bio *pbp;
667 struct g_raid_tr_raid1_object *trs;
668 uintptr_t *mask;
669 int error, do_write;
670
671 trs = (struct g_raid_tr_raid1_object *)tr;
672 vol = tr->tro_volume;
673 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
674 /*
675 * This operation is part of a rebuild or resync operation.
676 * See what work just got done, then schedule the next bit of
677 * work, if any. Rebuild/resync is done a little bit at a
678 * time. Either when a timeout happens, or after we get a
679 * bunch of I/Os to the disk (to make sure an active system
680 * will complete in a sane amount of time).
681 *
682 * We are setup to do differing amounts of work for each of
683 * these cases. so long as the slabs is smallish (less than
684 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
685 * have any bio starvation issues. For active disks, we do
686 * 5MB of data, for inactive ones, we do 50MB.
687 */
688 if (trs->trso_type == TR_RAID1_REBUILD) {
689 if (bp->bio_cmd == BIO_READ) {
690
691 /* Immediately abort rebuild, if requested. */
692 if (trs->trso_flags & TR_RAID1_F_ABORT) {
693 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
694 g_raid_tr_raid1_rebuild_abort(tr);
695 return;
696 }
697
698 /* On read error, skip and cross fingers. */
699 if (bp->bio_error != 0) {
700 G_RAID_LOGREQ(0, bp,
701 "Read error during rebuild (%d), "
702 "possible data loss!",
703 bp->bio_error);
704 goto rebuild_round_done;
705 }
706
707 /*
708 * The read operation finished, queue the
709 * write and get out.
710 */
711 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
712 bp->bio_error);
713 bp->bio_cmd = BIO_WRITE;
714 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
715 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
716 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
717 } else {
718 /*
719 * The write operation just finished. Do
720 * another. We keep cloning the master bio
721 * since it has the right buffers allocated to
722 * it.
723 */
724 G_RAID_LOGREQ(4, bp,
725 "rebuild write done. Error %d",
726 bp->bio_error);
727 nsd = trs->trso_failed_sd;
728 if (bp->bio_error != 0 ||
729 trs->trso_flags & TR_RAID1_F_ABORT) {
730 if ((trs->trso_flags &
731 TR_RAID1_F_ABORT) == 0) {
732 g_raid_tr_raid1_fail_disk(sd->sd_softc,
733 nsd, nsd->sd_disk);
734 }
735 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
736 g_raid_tr_raid1_rebuild_abort(tr);
737 return;
738 }
739 rebuild_round_done:
740 nsd = trs->trso_failed_sd;
741 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
742 g_raid_unlock_range(sd->sd_volume,
743 bp->bio_offset, bp->bio_length);
744 nsd->sd_rebuild_pos += bp->bio_length;
745 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
746 g_raid_tr_raid1_rebuild_finish(tr);
747 return;
748 }
749
750 /* Abort rebuild if we are stopping */
751 if (trs->trso_stopping) {
752 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
753 g_raid_tr_raid1_rebuild_abort(tr);
754 return;
755 }
756
757 if (--trs->trso_meta_update <= 0) {
758 g_raid_write_metadata(vol->v_softc,
759 vol, nsd, nsd->sd_disk);
760 trs->trso_meta_update =
761 g_raid1_rebuild_meta_update;
762 }
763 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
764 if (--trs->trso_recover_slabs <= 0)
765 return;
766 g_raid_tr_raid1_rebuild_some(tr);
767 }
768 } else if (trs->trso_type == TR_RAID1_RESYNC) {
769 /*
770 * read good sd, read bad sd in parallel. when both
771 * done, compare the buffers. write good to the bad
772 * if different. do the next bit of work.
773 */
774 panic("Somehow, we think we're doing a resync");
775 }
776 return;
777 }
778 pbp = bp->bio_parent;
779 pbp->bio_inbed++;
780 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
781 /*
782 * Read failed on first drive. Retry the read error on
783 * another disk drive, if available, before erroring out the
784 * read.
785 */
786 sd->sd_disk->d_read_errs++;
787 G_RAID_LOGREQ(0, bp,
788 "Read error (%d), %d read errors total",
789 bp->bio_error, sd->sd_disk->d_read_errs);
790
791 /*
792 * If there are too many read errors, we move to degraded.
793 * XXX Do we want to FAIL the drive (eg, make the user redo
794 * everything to get it back in sync), or just degrade the
795 * drive, which kicks off a resync?
796 */
797 do_write = 1;
798 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
799 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
800 if (pbp->bio_children == 1)
801 do_write = 0;
802 }
803
804 /*
805 * Find the other disk, and try to do the I/O to it.
806 */
807 mask = (uintptr_t *)(&pbp->bio_driver2);
808 if (pbp->bio_children == 1) {
809 /* Save original subdisk. */
810 pbp->bio_driver1 = do_write ? sd : NULL;
811 *mask = 0;
812 }
813 *mask |= 1 << sd->sd_pos;
814 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
815 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
816 g_destroy_bio(bp);
817 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
818 nsd->sd_pos);
819 if (pbp->bio_children == 2 && do_write) {
820 sd->sd_recovery++;
821 cbp->bio_caller1 = nsd;
822 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
823 /* Lock callback starts I/O */
824 g_raid_lock_range(sd->sd_volume,
825 cbp->bio_offset, cbp->bio_length, pbp, cbp);
826 } else {
827 g_raid_subdisk_iostart(nsd, cbp);
828 }
829 return;
830 }
831 /*
832 * We can't retry. Return the original error by falling
833 * through. This will happen when there's only one good disk.
834 * We don't need to fail the raid, since its actual state is
835 * based on the state of the subdisks.
836 */
837 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
838 }
839 if (bp->bio_cmd == BIO_READ &&
840 bp->bio_error == 0 &&
841 pbp->bio_children > 1 &&
842 pbp->bio_driver1 != NULL) {
843 /*
844 * If it was a read, and bio_children is >1, then we just
845 * recovered the data from the second drive. We should try to
846 * write that data to the first drive if sector remapping is
847 * enabled. A write should put the data in a new place on the
848 * disk, remapping the bad sector. Do we need to do that by
849 * queueing a request to the main worker thread? It doesn't
850 * affect the return code of this current read, and can be
851 * done at our liesure. However, to make the code simpler, it
852 * is done syncrhonously.
853 */
854 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
855 cbp = g_clone_bio(pbp);
856 if (cbp != NULL) {
857 g_destroy_bio(bp);
858 cbp->bio_cmd = BIO_WRITE;
859 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
860 G_RAID_LOGREQ(2, cbp,
861 "Attempting bad sector remap on failing drive.");
862 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
863 return;
864 }
865 }
866 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
867 /*
868 * We're done with a recovery, mark the range as unlocked.
869 * For any write errors, we agressively fail the disk since
870 * there was both a READ and a WRITE error at this location.
871 * Both types of errors generally indicates the drive is on
872 * the verge of total failure anyway. Better to stop trusting
873 * it now. However, we need to reset error to 0 in that case
874 * because we're not failing the original I/O which succeeded.
875 */
876 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
877 G_RAID_LOGREQ(0, bp, "Remap write failed: "
878 "failing subdisk.");
879 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
880 bp->bio_error = 0;
881 }
882 if (pbp->bio_driver1 != NULL) {
883 ((struct g_raid_subdisk *)pbp->bio_driver1)
884 ->sd_recovery--;
885 }
886 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
887 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
888 bp->bio_length);
889 }
890 if (pbp->bio_cmd != BIO_READ) {
891 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
892 pbp->bio_error = bp->bio_error;
893 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
894 G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
895 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
896 }
897 error = pbp->bio_error;
898 } else
899 error = bp->bio_error;
900 g_destroy_bio(bp);
901 if (pbp->bio_children == pbp->bio_inbed) {
902 pbp->bio_completed = pbp->bio_length;
903 g_raid_iodone(pbp, error);
904 }
905 }
906
907 static int
908 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
909 void *virtual, vm_offset_t physical, off_t offset, size_t length)
910 {
911 struct g_raid_volume *vol;
912 struct g_raid_subdisk *sd;
913 int error, i, ok;
914
915 vol = tr->tro_volume;
916 error = 0;
917 ok = 0;
918 for (i = 0; i < vol->v_disks_count; i++) {
919 sd = &vol->v_subdisks[i];
920 switch (sd->sd_state) {
921 case G_RAID_SUBDISK_S_ACTIVE:
922 break;
923 case G_RAID_SUBDISK_S_REBUILD:
924 /*
925 * When rebuilding, only part of this subdisk is
926 * writable, the rest will be written as part of the
927 * that process.
928 */
929 if (offset >= sd->sd_rebuild_pos)
930 continue;
931 break;
932 case G_RAID_SUBDISK_S_STALE:
933 case G_RAID_SUBDISK_S_RESYNC:
934 /*
935 * Resyncing still writes on the theory that the
936 * resync'd disk is very close and writing it will
937 * keep it that way better if we keep up while
938 * resyncing.
939 */
940 break;
941 default:
942 continue;
943 }
944 error = g_raid_subdisk_kerneldump(sd,
945 virtual, physical, offset, length);
946 if (error == 0)
947 ok++;
948 }
949 return (ok > 0 ? 0 : error);
950 }
951
952 static int
953 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
954 {
955 struct bio *bp;
956 struct g_raid_subdisk *sd;
957
958 bp = (struct bio *)argp;
959 sd = (struct g_raid_subdisk *)bp->bio_caller1;
960 g_raid_subdisk_iostart(sd, bp);
961
962 return (0);
963 }
964
965 static int
966 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
967 {
968 struct g_raid_tr_raid1_object *trs;
969
970 trs = (struct g_raid_tr_raid1_object *)tr;
971 trs->trso_fair_io = g_raid1_rebuild_fair_io;
972 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
973 if (trs->trso_type == TR_RAID1_REBUILD)
974 g_raid_tr_raid1_rebuild_some(tr);
975 return (0);
976 }
977
978 static int
979 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
980 {
981 struct g_raid_tr_raid1_object *trs;
982
983 trs = (struct g_raid_tr_raid1_object *)tr;
984
985 if (trs->trso_buffer != NULL) {
986 free(trs->trso_buffer, M_TR_RAID1);
987 trs->trso_buffer = NULL;
988 }
989 return (0);
990 }
991
992 G_RAID_TR_DECLARE(raid1, "RAID1");
Cache object: 8b2634d0ab526810ba39b3b361d4f4f1
|