1 /*-
2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/11.2/sys/geom/raid/tr_raid1.c 298808 2016-04-29 20:56:58Z pfg $");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/kobj.h>
35 #include <sys/limits.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <geom/geom.h>
42 #include "geom/raid/g_raid.h"
43 #include "g_raid_tr_if.h"
44
45 SYSCTL_DECL(_kern_geom_raid_raid1);
46
47 #define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
48 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
49 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RWTUN,
50 &g_raid1_rebuild_slab, 0,
51 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
52
53 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
54 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
55 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RWTUN,
56 &g_raid1_rebuild_fair_io, 0,
57 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
58
59 #define RAID1_REBUILD_CLUSTER_IDLE 100
60 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
61 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RWTUN,
62 &g_raid1_rebuild_cluster_idle, 0,
63 "Number of slabs to do each time we trigger a rebuild cycle");
64
65 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
66 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
67 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RWTUN,
68 &g_raid1_rebuild_meta_update, 0,
69 "When to update the meta data.");
70
71 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
72
73 #define TR_RAID1_NONE 0
74 #define TR_RAID1_REBUILD 1
75 #define TR_RAID1_RESYNC 2
76
77 #define TR_RAID1_F_DOING_SOME 0x1
78 #define TR_RAID1_F_LOCKED 0x2
79 #define TR_RAID1_F_ABORT 0x4
80
81 struct g_raid_tr_raid1_object {
82 struct g_raid_tr_object trso_base;
83 int trso_starting;
84 int trso_stopping;
85 int trso_type;
86 int trso_recover_slabs; /* slabs before rest */
87 int trso_fair_io;
88 int trso_meta_update;
89 int trso_flags;
90 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
91 void *trso_buffer; /* Buffer space */
92 struct bio trso_bio;
93 };
94
95 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
96 static g_raid_tr_event_t g_raid_tr_event_raid1;
97 static g_raid_tr_start_t g_raid_tr_start_raid1;
98 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
99 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
100 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
101 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
102 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
103 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
104 static g_raid_tr_free_t g_raid_tr_free_raid1;
105
106 static kobj_method_t g_raid_tr_raid1_methods[] = {
107 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
108 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
109 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
110 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
111 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
112 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
113 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
114 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
115 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
116 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
117 { 0, 0 }
118 };
119
120 static struct g_raid_tr_class g_raid_tr_raid1_class = {
121 "RAID1",
122 g_raid_tr_raid1_methods,
123 sizeof(struct g_raid_tr_raid1_object),
124 .trc_enable = 1,
125 .trc_priority = 100,
126 .trc_accept_unmapped = 1
127 };
128
129 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
130 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
131 struct g_raid_subdisk *sd);
132
133 static int
134 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
135 {
136 struct g_raid_tr_raid1_object *trs;
137
138 trs = (struct g_raid_tr_raid1_object *)tr;
139 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
140 (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
141 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
142 return (G_RAID_TR_TASTE_FAIL);
143 trs->trso_starting = 1;
144 return (G_RAID_TR_TASTE_SUCCEED);
145 }
146
147 static int
148 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
149 struct g_raid_subdisk *sd)
150 {
151 struct g_raid_tr_raid1_object *trs;
152 struct g_raid_softc *sc;
153 struct g_raid_subdisk *tsd, *bestsd;
154 u_int s;
155 int i, na, ns;
156
157 sc = vol->v_softc;
158 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
159 if (trs->trso_stopping &&
160 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
161 s = G_RAID_VOLUME_S_STOPPED;
162 else if (trs->trso_starting)
163 s = G_RAID_VOLUME_S_STARTING;
164 else {
165 /* Make sure we have at least one ACTIVE disk. */
166 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
167 if (na == 0) {
168 /*
169 * Critical situation! We have no any active disk!
170 * Choose the best disk we have to make it active.
171 */
172 bestsd = &vol->v_subdisks[0];
173 for (i = 1; i < vol->v_disks_count; i++) {
174 tsd = &vol->v_subdisks[i];
175 if (tsd->sd_state > bestsd->sd_state)
176 bestsd = tsd;
177 else if (tsd->sd_state == bestsd->sd_state &&
178 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
179 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
180 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
181 bestsd = tsd;
182 }
183 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
184 /* We found reasonable candidate. */
185 G_RAID_DEBUG1(1, sc,
186 "Promote subdisk %s:%d from %s to ACTIVE.",
187 vol->v_name, bestsd->sd_pos,
188 g_raid_subdisk_state2str(bestsd->sd_state));
189 g_raid_change_subdisk_state(bestsd,
190 G_RAID_SUBDISK_S_ACTIVE);
191 g_raid_write_metadata(sc,
192 vol, bestsd, bestsd->sd_disk);
193 }
194 }
195 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
196 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
197 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
198 if (na == vol->v_disks_count)
199 s = G_RAID_VOLUME_S_OPTIMAL;
200 else if (na + ns == vol->v_disks_count)
201 s = G_RAID_VOLUME_S_SUBOPTIMAL;
202 else if (na > 0)
203 s = G_RAID_VOLUME_S_DEGRADED;
204 else
205 s = G_RAID_VOLUME_S_BROKEN;
206 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
207 }
208 if (s != vol->v_state) {
209 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
210 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
211 G_RAID_EVENT_VOLUME);
212 g_raid_change_volume_state(vol, s);
213 if (!trs->trso_starting && !trs->trso_stopping)
214 g_raid_write_metadata(sc, vol, NULL, NULL);
215 }
216 return (0);
217 }
218
219 static void
220 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
221 struct g_raid_disk *disk)
222 {
223 /*
224 * We don't fail the last disk in the pack, since it still has decent
225 * data on it and that's better than failing the disk if it is the root
226 * file system.
227 *
228 * XXX should this be controlled via a tunable? It makes sense for
229 * the volume that has / on it. I can't think of a case where we'd
230 * want the volume to go away on this kind of event.
231 */
232 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
233 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
234 return;
235 g_raid_fail_disk(sc, sd, disk);
236 }
237
238 static void
239 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
240 {
241 struct g_raid_tr_raid1_object *trs;
242 struct g_raid_subdisk *sd, *good_sd;
243 struct bio *bp;
244
245 trs = (struct g_raid_tr_raid1_object *)tr;
246 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
247 return;
248 sd = trs->trso_failed_sd;
249 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
250 if (good_sd == NULL) {
251 g_raid_tr_raid1_rebuild_abort(tr);
252 return;
253 }
254 bp = &trs->trso_bio;
255 memset(bp, 0, sizeof(*bp));
256 bp->bio_offset = sd->sd_rebuild_pos;
257 bp->bio_length = MIN(g_raid1_rebuild_slab,
258 sd->sd_size - sd->sd_rebuild_pos);
259 bp->bio_data = trs->trso_buffer;
260 bp->bio_cmd = BIO_READ;
261 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
262 bp->bio_caller1 = good_sd;
263 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
264 trs->trso_flags |= TR_RAID1_F_LOCKED;
265 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
266 bp->bio_offset, bp->bio_length, NULL, bp);
267 }
268
269 static void
270 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
271 {
272 struct g_raid_volume *vol;
273 struct g_raid_subdisk *sd;
274
275 vol = trs->trso_base.tro_volume;
276 sd = trs->trso_failed_sd;
277 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
278 free(trs->trso_buffer, M_TR_RAID1);
279 trs->trso_buffer = NULL;
280 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
281 trs->trso_type = TR_RAID1_NONE;
282 trs->trso_recover_slabs = 0;
283 trs->trso_failed_sd = NULL;
284 g_raid_tr_update_state_raid1(vol, NULL);
285 }
286
287 static void
288 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
289 {
290 struct g_raid_tr_raid1_object *trs;
291 struct g_raid_subdisk *sd;
292
293 trs = (struct g_raid_tr_raid1_object *)tr;
294 sd = trs->trso_failed_sd;
295 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
296 "Subdisk %s:%d-%s rebuild completed.",
297 sd->sd_volume->v_name, sd->sd_pos,
298 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
299 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
300 sd->sd_rebuild_pos = 0;
301 g_raid_tr_raid1_rebuild_done(trs);
302 }
303
304 static void
305 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
306 {
307 struct g_raid_tr_raid1_object *trs;
308 struct g_raid_subdisk *sd;
309 struct g_raid_volume *vol;
310 off_t len;
311
312 vol = tr->tro_volume;
313 trs = (struct g_raid_tr_raid1_object *)tr;
314 sd = trs->trso_failed_sd;
315 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
316 G_RAID_DEBUG1(1, vol->v_softc,
317 "Subdisk %s:%d-%s rebuild is aborting.",
318 sd->sd_volume->v_name, sd->sd_pos,
319 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
320 trs->trso_flags |= TR_RAID1_F_ABORT;
321 } else {
322 G_RAID_DEBUG1(0, vol->v_softc,
323 "Subdisk %s:%d-%s rebuild aborted.",
324 sd->sd_volume->v_name, sd->sd_pos,
325 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
326 trs->trso_flags &= ~TR_RAID1_F_ABORT;
327 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
328 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
329 len = MIN(g_raid1_rebuild_slab,
330 sd->sd_size - sd->sd_rebuild_pos);
331 g_raid_unlock_range(tr->tro_volume,
332 sd->sd_rebuild_pos, len);
333 }
334 g_raid_tr_raid1_rebuild_done(trs);
335 }
336 }
337
338 static void
339 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
340 {
341 struct g_raid_volume *vol;
342 struct g_raid_tr_raid1_object *trs;
343 struct g_raid_subdisk *sd, *fsd;
344
345 vol = tr->tro_volume;
346 trs = (struct g_raid_tr_raid1_object *)tr;
347 if (trs->trso_failed_sd) {
348 G_RAID_DEBUG1(1, vol->v_softc,
349 "Already rebuild in start rebuild. pos %jd\n",
350 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
351 return;
352 }
353 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
354 if (sd == NULL) {
355 G_RAID_DEBUG1(1, vol->v_softc,
356 "No active disk to rebuild. night night.");
357 return;
358 }
359 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
360 if (fsd == NULL)
361 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
362 if (fsd == NULL) {
363 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
364 if (fsd != NULL) {
365 fsd->sd_rebuild_pos = 0;
366 g_raid_change_subdisk_state(fsd,
367 G_RAID_SUBDISK_S_RESYNC);
368 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
369 } else {
370 fsd = g_raid_get_subdisk(vol,
371 G_RAID_SUBDISK_S_UNINITIALIZED);
372 if (fsd == NULL)
373 fsd = g_raid_get_subdisk(vol,
374 G_RAID_SUBDISK_S_NEW);
375 if (fsd != NULL) {
376 fsd->sd_rebuild_pos = 0;
377 g_raid_change_subdisk_state(fsd,
378 G_RAID_SUBDISK_S_REBUILD);
379 g_raid_write_metadata(vol->v_softc,
380 vol, fsd, NULL);
381 }
382 }
383 }
384 if (fsd == NULL) {
385 G_RAID_DEBUG1(1, vol->v_softc,
386 "No failed disk to rebuild. night night.");
387 return;
388 }
389 trs->trso_failed_sd = fsd;
390 G_RAID_DEBUG1(0, vol->v_softc,
391 "Subdisk %s:%d-%s rebuild start at %jd.",
392 fsd->sd_volume->v_name, fsd->sd_pos,
393 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
394 trs->trso_failed_sd->sd_rebuild_pos);
395 trs->trso_type = TR_RAID1_REBUILD;
396 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
397 trs->trso_meta_update = g_raid1_rebuild_meta_update;
398 g_raid_tr_raid1_rebuild_some(tr);
399 }
400
401
402 static void
403 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
404 struct g_raid_subdisk *sd)
405 {
406 struct g_raid_volume *vol;
407 struct g_raid_tr_raid1_object *trs;
408 int na, nr;
409
410 /*
411 * If we're stopping, don't do anything. If we don't have at least one
412 * good disk and one bad disk, we don't do anything. And if there's a
413 * 'good disk' stored in the trs, then we're in progress and we punt.
414 * If we make it past all these checks, we need to rebuild.
415 */
416 vol = tr->tro_volume;
417 trs = (struct g_raid_tr_raid1_object *)tr;
418 if (trs->trso_stopping)
419 return;
420 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
421 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
422 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
423 switch(trs->trso_type) {
424 case TR_RAID1_NONE:
425 if (na == 0)
426 return;
427 if (nr == 0) {
428 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
429 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
430 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
431 if (nr == 0)
432 return;
433 }
434 g_raid_tr_raid1_rebuild_start(tr);
435 break;
436 case TR_RAID1_REBUILD:
437 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
438 g_raid_tr_raid1_rebuild_abort(tr);
439 break;
440 case TR_RAID1_RESYNC:
441 break;
442 }
443 }
444
445 static int
446 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
447 struct g_raid_subdisk *sd, u_int event)
448 {
449
450 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
451 return (0);
452 }
453
454 static int
455 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
456 {
457 struct g_raid_tr_raid1_object *trs;
458 struct g_raid_volume *vol;
459
460 trs = (struct g_raid_tr_raid1_object *)tr;
461 vol = tr->tro_volume;
462 trs->trso_starting = 0;
463 g_raid_tr_update_state_raid1(vol, NULL);
464 return (0);
465 }
466
467 static int
468 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
469 {
470 struct g_raid_tr_raid1_object *trs;
471 struct g_raid_volume *vol;
472
473 trs = (struct g_raid_tr_raid1_object *)tr;
474 vol = tr->tro_volume;
475 trs->trso_starting = 0;
476 trs->trso_stopping = 1;
477 g_raid_tr_update_state_raid1(vol, NULL);
478 return (0);
479 }
480
481 /*
482 * Select the disk to read from. Take into account: subdisk state, running
483 * error recovery, average disk load, head position and possible cache hits.
484 */
485 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
486 static struct g_raid_subdisk *
487 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
488 u_int mask)
489 {
490 struct g_raid_subdisk *sd, *best;
491 int i, prio, bestprio;
492
493 best = NULL;
494 bestprio = INT_MAX;
495 for (i = 0; i < vol->v_disks_count; i++) {
496 sd = &vol->v_subdisks[i];
497 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
498 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
499 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
500 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
501 continue;
502 if ((mask & (1 << i)) != 0)
503 continue;
504 prio = G_RAID_SUBDISK_LOAD(sd);
505 prio += min(sd->sd_recovery, 255) << 22;
506 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
507 /* If disk head is precisely in position - highly prefer it. */
508 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
509 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
510 else
511 /* If disk head is close to position - prefer it. */
512 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
513 G_RAID_SUBDISK_TRACK_SIZE)
514 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
515 if (prio < bestprio) {
516 best = sd;
517 bestprio = prio;
518 }
519 }
520 return (best);
521 }
522
523 static void
524 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
525 {
526 struct g_raid_subdisk *sd;
527 struct bio *cbp;
528
529 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
530 KASSERT(sd != NULL, ("No active disks in volume %s.",
531 tr->tro_volume->v_name));
532
533 cbp = g_clone_bio(bp);
534 if (cbp == NULL) {
535 g_raid_iodone(bp, ENOMEM);
536 return;
537 }
538
539 g_raid_subdisk_iostart(sd, cbp);
540 }
541
542 static void
543 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
544 {
545 struct g_raid_volume *vol;
546 struct g_raid_subdisk *sd;
547 struct bio_queue_head queue;
548 struct bio *cbp;
549 int i;
550
551 vol = tr->tro_volume;
552
553 /*
554 * Allocate all bios before sending any request, so we can return
555 * ENOMEM in nice and clean way.
556 */
557 bioq_init(&queue);
558 for (i = 0; i < vol->v_disks_count; i++) {
559 sd = &vol->v_subdisks[i];
560 switch (sd->sd_state) {
561 case G_RAID_SUBDISK_S_ACTIVE:
562 break;
563 case G_RAID_SUBDISK_S_REBUILD:
564 /*
565 * When rebuilding, only part of this subdisk is
566 * writable, the rest will be written as part of the
567 * that process.
568 */
569 if (bp->bio_offset >= sd->sd_rebuild_pos)
570 continue;
571 break;
572 case G_RAID_SUBDISK_S_STALE:
573 case G_RAID_SUBDISK_S_RESYNC:
574 /*
575 * Resyncing still writes on the theory that the
576 * resync'd disk is very close and writing it will
577 * keep it that way better if we keep up while
578 * resyncing.
579 */
580 break;
581 default:
582 continue;
583 }
584 cbp = g_clone_bio(bp);
585 if (cbp == NULL)
586 goto failure;
587 cbp->bio_caller1 = sd;
588 bioq_insert_tail(&queue, cbp);
589 }
590 while ((cbp = bioq_takefirst(&queue)) != NULL) {
591 sd = cbp->bio_caller1;
592 cbp->bio_caller1 = NULL;
593 g_raid_subdisk_iostart(sd, cbp);
594 }
595 return;
596 failure:
597 while ((cbp = bioq_takefirst(&queue)) != NULL)
598 g_destroy_bio(cbp);
599 if (bp->bio_error == 0)
600 bp->bio_error = ENOMEM;
601 g_raid_iodone(bp, bp->bio_error);
602 }
603
604 static void
605 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
606 {
607 struct g_raid_volume *vol;
608 struct g_raid_tr_raid1_object *trs;
609
610 vol = tr->tro_volume;
611 trs = (struct g_raid_tr_raid1_object *)tr;
612 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
613 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
614 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
615 g_raid_iodone(bp, EIO);
616 return;
617 }
618 /*
619 * If we're rebuilding, squeeze in rebuild activity every so often,
620 * even when the disk is busy. Be sure to only count real I/O
621 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
622 * by this module.
623 */
624 if (trs->trso_failed_sd != NULL &&
625 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
626 /* Make this new or running now round short. */
627 trs->trso_recover_slabs = 0;
628 if (--trs->trso_fair_io <= 0) {
629 trs->trso_fair_io = g_raid1_rebuild_fair_io;
630 g_raid_tr_raid1_rebuild_some(tr);
631 }
632 }
633 switch (bp->bio_cmd) {
634 case BIO_READ:
635 g_raid_tr_iostart_raid1_read(tr, bp);
636 break;
637 case BIO_WRITE:
638 case BIO_DELETE:
639 g_raid_tr_iostart_raid1_write(tr, bp);
640 break;
641 case BIO_FLUSH:
642 g_raid_tr_flush_common(tr, bp);
643 break;
644 default:
645 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
646 bp->bio_cmd, vol->v_name));
647 break;
648 }
649 }
650
651 static void
652 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
653 struct g_raid_subdisk *sd, struct bio *bp)
654 {
655 struct bio *cbp;
656 struct g_raid_subdisk *nsd;
657 struct g_raid_volume *vol;
658 struct bio *pbp;
659 struct g_raid_tr_raid1_object *trs;
660 uintptr_t *mask;
661 int error, do_write;
662
663 trs = (struct g_raid_tr_raid1_object *)tr;
664 vol = tr->tro_volume;
665 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
666 /*
667 * This operation is part of a rebuild or resync operation.
668 * See what work just got done, then schedule the next bit of
669 * work, if any. Rebuild/resync is done a little bit at a
670 * time. Either when a timeout happens, or after we get a
671 * bunch of I/Os to the disk (to make sure an active system
672 * will complete in a sane amount of time).
673 *
674 * We are setup to do differing amounts of work for each of
675 * these cases. so long as the slabs is smallish (less than
676 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
677 * have any bio starvation issues. For active disks, we do
678 * 5MB of data, for inactive ones, we do 50MB.
679 */
680 if (trs->trso_type == TR_RAID1_REBUILD) {
681 if (bp->bio_cmd == BIO_READ) {
682
683 /* Immediately abort rebuild, if requested. */
684 if (trs->trso_flags & TR_RAID1_F_ABORT) {
685 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
686 g_raid_tr_raid1_rebuild_abort(tr);
687 return;
688 }
689
690 /* On read error, skip and cross fingers. */
691 if (bp->bio_error != 0) {
692 G_RAID_LOGREQ(0, bp,
693 "Read error during rebuild (%d), "
694 "possible data loss!",
695 bp->bio_error);
696 goto rebuild_round_done;
697 }
698
699 /*
700 * The read operation finished, queue the
701 * write and get out.
702 */
703 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
704 bp->bio_error);
705 bp->bio_cmd = BIO_WRITE;
706 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
707 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
708 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
709 } else {
710 /*
711 * The write operation just finished. Do
712 * another. We keep cloning the master bio
713 * since it has the right buffers allocated to
714 * it.
715 */
716 G_RAID_LOGREQ(4, bp,
717 "rebuild write done. Error %d",
718 bp->bio_error);
719 nsd = trs->trso_failed_sd;
720 if (bp->bio_error != 0 ||
721 trs->trso_flags & TR_RAID1_F_ABORT) {
722 if ((trs->trso_flags &
723 TR_RAID1_F_ABORT) == 0) {
724 g_raid_tr_raid1_fail_disk(sd->sd_softc,
725 nsd, nsd->sd_disk);
726 }
727 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
728 g_raid_tr_raid1_rebuild_abort(tr);
729 return;
730 }
731 rebuild_round_done:
732 nsd = trs->trso_failed_sd;
733 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
734 g_raid_unlock_range(sd->sd_volume,
735 bp->bio_offset, bp->bio_length);
736 nsd->sd_rebuild_pos += bp->bio_length;
737 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
738 g_raid_tr_raid1_rebuild_finish(tr);
739 return;
740 }
741
742 /* Abort rebuild if we are stopping */
743 if (trs->trso_stopping) {
744 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
745 g_raid_tr_raid1_rebuild_abort(tr);
746 return;
747 }
748
749 if (--trs->trso_meta_update <= 0) {
750 g_raid_write_metadata(vol->v_softc,
751 vol, nsd, nsd->sd_disk);
752 trs->trso_meta_update =
753 g_raid1_rebuild_meta_update;
754 }
755 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
756 if (--trs->trso_recover_slabs <= 0)
757 return;
758 g_raid_tr_raid1_rebuild_some(tr);
759 }
760 } else if (trs->trso_type == TR_RAID1_RESYNC) {
761 /*
762 * read good sd, read bad sd in parallel. when both
763 * done, compare the buffers. write good to the bad
764 * if different. do the next bit of work.
765 */
766 panic("Somehow, we think we're doing a resync");
767 }
768 return;
769 }
770 pbp = bp->bio_parent;
771 pbp->bio_inbed++;
772 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
773 /*
774 * Read failed on first drive. Retry the read error on
775 * another disk drive, if available, before erroring out the
776 * read.
777 */
778 sd->sd_disk->d_read_errs++;
779 G_RAID_LOGREQ(0, bp,
780 "Read error (%d), %d read errors total",
781 bp->bio_error, sd->sd_disk->d_read_errs);
782
783 /*
784 * If there are too many read errors, we move to degraded.
785 * XXX Do we want to FAIL the drive (eg, make the user redo
786 * everything to get it back in sync), or just degrade the
787 * drive, which kicks off a resync?
788 */
789 do_write = 1;
790 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
791 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
792 if (pbp->bio_children == 1)
793 do_write = 0;
794 }
795
796 /*
797 * Find the other disk, and try to do the I/O to it.
798 */
799 mask = (uintptr_t *)(&pbp->bio_driver2);
800 if (pbp->bio_children == 1) {
801 /* Save original subdisk. */
802 pbp->bio_driver1 = do_write ? sd : NULL;
803 *mask = 0;
804 }
805 *mask |= 1 << sd->sd_pos;
806 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
807 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
808 g_destroy_bio(bp);
809 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
810 nsd->sd_pos);
811 if (pbp->bio_children == 2 && do_write) {
812 sd->sd_recovery++;
813 cbp->bio_caller1 = nsd;
814 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
815 /* Lock callback starts I/O */
816 g_raid_lock_range(sd->sd_volume,
817 cbp->bio_offset, cbp->bio_length, pbp, cbp);
818 } else {
819 g_raid_subdisk_iostart(nsd, cbp);
820 }
821 return;
822 }
823 /*
824 * We can't retry. Return the original error by falling
825 * through. This will happen when there's only one good disk.
826 * We don't need to fail the raid, since its actual state is
827 * based on the state of the subdisks.
828 */
829 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
830 }
831 if (bp->bio_cmd == BIO_READ &&
832 bp->bio_error == 0 &&
833 pbp->bio_children > 1 &&
834 pbp->bio_driver1 != NULL) {
835 /*
836 * If it was a read, and bio_children is >1, then we just
837 * recovered the data from the second drive. We should try to
838 * write that data to the first drive if sector remapping is
839 * enabled. A write should put the data in a new place on the
840 * disk, remapping the bad sector. Do we need to do that by
841 * queueing a request to the main worker thread? It doesn't
842 * affect the return code of this current read, and can be
843 * done at our leisure. However, to make the code simpler, it
844 * is done synchronously.
845 */
846 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
847 cbp = g_clone_bio(pbp);
848 if (cbp != NULL) {
849 g_destroy_bio(bp);
850 cbp->bio_cmd = BIO_WRITE;
851 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
852 G_RAID_LOGREQ(2, cbp,
853 "Attempting bad sector remap on failing drive.");
854 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
855 return;
856 }
857 }
858 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
859 /*
860 * We're done with a recovery, mark the range as unlocked.
861 * For any write errors, we aggressively fail the disk since
862 * there was both a READ and a WRITE error at this location.
863 * Both types of errors generally indicates the drive is on
864 * the verge of total failure anyway. Better to stop trusting
865 * it now. However, we need to reset error to 0 in that case
866 * because we're not failing the original I/O which succeeded.
867 */
868 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
869 G_RAID_LOGREQ(0, bp, "Remap write failed: "
870 "failing subdisk.");
871 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
872 bp->bio_error = 0;
873 }
874 if (pbp->bio_driver1 != NULL) {
875 ((struct g_raid_subdisk *)pbp->bio_driver1)
876 ->sd_recovery--;
877 }
878 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
879 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
880 bp->bio_length);
881 }
882 if (pbp->bio_cmd != BIO_READ) {
883 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
884 pbp->bio_error = bp->bio_error;
885 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
886 G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
887 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
888 }
889 error = pbp->bio_error;
890 } else
891 error = bp->bio_error;
892 g_destroy_bio(bp);
893 if (pbp->bio_children == pbp->bio_inbed) {
894 pbp->bio_completed = pbp->bio_length;
895 g_raid_iodone(pbp, error);
896 }
897 }
898
899 static int
900 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
901 void *virtual, vm_offset_t physical, off_t offset, size_t length)
902 {
903 struct g_raid_volume *vol;
904 struct g_raid_subdisk *sd;
905 int error, i, ok;
906
907 vol = tr->tro_volume;
908 error = 0;
909 ok = 0;
910 for (i = 0; i < vol->v_disks_count; i++) {
911 sd = &vol->v_subdisks[i];
912 switch (sd->sd_state) {
913 case G_RAID_SUBDISK_S_ACTIVE:
914 break;
915 case G_RAID_SUBDISK_S_REBUILD:
916 /*
917 * When rebuilding, only part of this subdisk is
918 * writable, the rest will be written as part of the
919 * that process.
920 */
921 if (offset >= sd->sd_rebuild_pos)
922 continue;
923 break;
924 case G_RAID_SUBDISK_S_STALE:
925 case G_RAID_SUBDISK_S_RESYNC:
926 /*
927 * Resyncing still writes on the theory that the
928 * resync'd disk is very close and writing it will
929 * keep it that way better if we keep up while
930 * resyncing.
931 */
932 break;
933 default:
934 continue;
935 }
936 error = g_raid_subdisk_kerneldump(sd,
937 virtual, physical, offset, length);
938 if (error == 0)
939 ok++;
940 }
941 return (ok > 0 ? 0 : error);
942 }
943
944 static int
945 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
946 {
947 struct bio *bp;
948 struct g_raid_subdisk *sd;
949
950 bp = (struct bio *)argp;
951 sd = (struct g_raid_subdisk *)bp->bio_caller1;
952 g_raid_subdisk_iostart(sd, bp);
953
954 return (0);
955 }
956
957 static int
958 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
959 {
960 struct g_raid_tr_raid1_object *trs;
961
962 trs = (struct g_raid_tr_raid1_object *)tr;
963 trs->trso_fair_io = g_raid1_rebuild_fair_io;
964 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
965 if (trs->trso_type == TR_RAID1_REBUILD)
966 g_raid_tr_raid1_rebuild_some(tr);
967 return (0);
968 }
969
970 static int
971 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
972 {
973 struct g_raid_tr_raid1_object *trs;
974
975 trs = (struct g_raid_tr_raid1_object *)tr;
976
977 if (trs->trso_buffer != NULL) {
978 free(trs->trso_buffer, M_TR_RAID1);
979 trs->trso_buffer = NULL;
980 }
981 return (0);
982 }
983
984 G_RAID_TR_DECLARE(raid1, "RAID1");
Cache object: 788027f813e612908e0b9b189be606d1
|