FreeBSD/Linux Kernel Cross Reference
sys/geom/raid/g_raid.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/12.0/sys/geom/raid/g_raid.c 327173 2017-12-25 04:48:39Z kan $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/bio.h>
40 #include <sys/sbuf.h>
41 #include <sys/sysctl.h>
42 #include <sys/malloc.h>
43 #include <sys/eventhandler.h>
44 #include <vm/uma.h>
45 #include <geom/geom.h>
46 #include <sys/proc.h>
47 #include <sys/kthread.h>
48 #include <sys/sched.h>
49 #include <geom/raid/g_raid.h>
50 #include "g_raid_md_if.h"
51 #include "g_raid_tr_if.h"
52
53 static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data");
54
55 SYSCTL_DECL(_kern_geom);
56 SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff");
57 int g_raid_enable = 1;
58 SYSCTL_INT(_kern_geom_raid, OID_AUTO, enable, CTLFLAG_RWTUN,
59 &g_raid_enable, 0, "Enable on-disk metadata taste");
60 u_int g_raid_aggressive_spare = 0;
61 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RWTUN,
62 &g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
63 u_int g_raid_debug = 0;
64 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid_debug, 0,
65 "Debug level");
66 int g_raid_read_err_thresh = 10;
67 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RWTUN,
68 &g_raid_read_err_thresh, 0,
69 "Number of read errors equated to disk failure");
70 u_int g_raid_start_timeout = 30;
71 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RWTUN,
72 &g_raid_start_timeout, 0,
73 "Time to wait for all array components");
74 static u_int g_raid_clean_time = 5;
75 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RWTUN,
76 &g_raid_clean_time, 0, "Mark volume as clean when idling");
77 static u_int g_raid_disconnect_on_failure = 1;
78 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
79 &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
80 static u_int g_raid_name_format = 0;
81 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RWTUN,
82 &g_raid_name_format, 0, "Providers name format.");
83 static u_int g_raid_idle_threshold = 1000000;
84 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RWTUN,
85 &g_raid_idle_threshold, 1000000,
86 "Time in microseconds to consider a volume idle.");
87
88 #define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \
89 G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
90 rv = msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
91 G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
92 } while (0)
93
94 LIST_HEAD(, g_raid_md_class) g_raid_md_classes =
95 LIST_HEAD_INITIALIZER(g_raid_md_classes);
96
97 LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes =
98 LIST_HEAD_INITIALIZER(g_raid_tr_classes);
99
100 LIST_HEAD(, g_raid_volume) g_raid_volumes =
101 LIST_HEAD_INITIALIZER(g_raid_volumes);
102
103 static eventhandler_tag g_raid_post_sync = NULL;
104 static int g_raid_started = 0;
105 static int g_raid_shutdown = 0;
106
107 static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
108 struct g_geom *gp);
109 static g_taste_t g_raid_taste;
110 static void g_raid_init(struct g_class *mp);
111 static void g_raid_fini(struct g_class *mp);
112
113 struct g_class g_raid_class = {
114 .name = G_RAID_CLASS_NAME,
115 .version = G_VERSION,
116 .ctlreq = g_raid_ctl,
117 .taste = g_raid_taste,
118 .destroy_geom = g_raid_destroy_geom,
119 .init = g_raid_init,
120 .fini = g_raid_fini
121 };
122
123 static void g_raid_destroy_provider(struct g_raid_volume *vol);
124 static int g_raid_update_disk(struct g_raid_disk *disk, u_int event);
125 static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event);
126 static int g_raid_update_volume(struct g_raid_volume *vol, u_int event);
127 static int g_raid_update_node(struct g_raid_softc *sc, u_int event);
128 static void g_raid_dumpconf(struct sbuf *sb, const char *indent,
129 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
130 static void g_raid_start(struct bio *bp);
131 static void g_raid_start_request(struct bio *bp);
132 static void g_raid_disk_done(struct bio *bp);
133 static void g_raid_poll(struct g_raid_softc *sc);
134
135 static const char *
136 g_raid_node_event2str(int event)
137 {
138
139 switch (event) {
140 case G_RAID_NODE_E_WAKE:
141 return ("WAKE");
142 case G_RAID_NODE_E_START:
143 return ("START");
144 default:
145 return ("INVALID");
146 }
147 }
148
149 const char *
150 g_raid_disk_state2str(int state)
151 {
152
153 switch (state) {
154 case G_RAID_DISK_S_NONE:
155 return ("NONE");
156 case G_RAID_DISK_S_OFFLINE:
157 return ("OFFLINE");
158 case G_RAID_DISK_S_DISABLED:
159 return ("DISABLED");
160 case G_RAID_DISK_S_FAILED:
161 return ("FAILED");
162 case G_RAID_DISK_S_STALE_FAILED:
163 return ("STALE_FAILED");
164 case G_RAID_DISK_S_SPARE:
165 return ("SPARE");
166 case G_RAID_DISK_S_STALE:
167 return ("STALE");
168 case G_RAID_DISK_S_ACTIVE:
169 return ("ACTIVE");
170 default:
171 return ("INVALID");
172 }
173 }
174
175 static const char *
176 g_raid_disk_event2str(int event)
177 {
178
179 switch (event) {
180 case G_RAID_DISK_E_DISCONNECTED:
181 return ("DISCONNECTED");
182 default:
183 return ("INVALID");
184 }
185 }
186
187 const char *
188 g_raid_subdisk_state2str(int state)
189 {
190
191 switch (state) {
192 case G_RAID_SUBDISK_S_NONE:
193 return ("NONE");
194 case G_RAID_SUBDISK_S_FAILED:
195 return ("FAILED");
196 case G_RAID_SUBDISK_S_NEW:
197 return ("NEW");
198 case G_RAID_SUBDISK_S_REBUILD:
199 return ("REBUILD");
200 case G_RAID_SUBDISK_S_UNINITIALIZED:
201 return ("UNINITIALIZED");
202 case G_RAID_SUBDISK_S_STALE:
203 return ("STALE");
204 case G_RAID_SUBDISK_S_RESYNC:
205 return ("RESYNC");
206 case G_RAID_SUBDISK_S_ACTIVE:
207 return ("ACTIVE");
208 default:
209 return ("INVALID");
210 }
211 }
212
213 static const char *
214 g_raid_subdisk_event2str(int event)
215 {
216
217 switch (event) {
218 case G_RAID_SUBDISK_E_NEW:
219 return ("NEW");
220 case G_RAID_SUBDISK_E_FAILED:
221 return ("FAILED");
222 case G_RAID_SUBDISK_E_DISCONNECTED:
223 return ("DISCONNECTED");
224 default:
225 return ("INVALID");
226 }
227 }
228
229 const char *
230 g_raid_volume_state2str(int state)
231 {
232
233 switch (state) {
234 case G_RAID_VOLUME_S_STARTING:
235 return ("STARTING");
236 case G_RAID_VOLUME_S_BROKEN:
237 return ("BROKEN");
238 case G_RAID_VOLUME_S_DEGRADED:
239 return ("DEGRADED");
240 case G_RAID_VOLUME_S_SUBOPTIMAL:
241 return ("SUBOPTIMAL");
242 case G_RAID_VOLUME_S_OPTIMAL:
243 return ("OPTIMAL");
244 case G_RAID_VOLUME_S_UNSUPPORTED:
245 return ("UNSUPPORTED");
246 case G_RAID_VOLUME_S_STOPPED:
247 return ("STOPPED");
248 default:
249 return ("INVALID");
250 }
251 }
252
253 static const char *
254 g_raid_volume_event2str(int event)
255 {
256
257 switch (event) {
258 case G_RAID_VOLUME_E_UP:
259 return ("UP");
260 case G_RAID_VOLUME_E_DOWN:
261 return ("DOWN");
262 case G_RAID_VOLUME_E_START:
263 return ("START");
264 case G_RAID_VOLUME_E_STARTMD:
265 return ("STARTMD");
266 default:
267 return ("INVALID");
268 }
269 }
270
271 const char *
272 g_raid_volume_level2str(int level, int qual)
273 {
274
275 switch (level) {
276 case G_RAID_VOLUME_RL_RAID0:
277 return ("RAID0");
278 case G_RAID_VOLUME_RL_RAID1:
279 return ("RAID1");
280 case G_RAID_VOLUME_RL_RAID3:
281 if (qual == G_RAID_VOLUME_RLQ_R3P0)
282 return ("RAID3-P0");
283 if (qual == G_RAID_VOLUME_RLQ_R3PN)
284 return ("RAID3-PN");
285 return ("RAID3");
286 case G_RAID_VOLUME_RL_RAID4:
287 if (qual == G_RAID_VOLUME_RLQ_R4P0)
288 return ("RAID4-P0");
289 if (qual == G_RAID_VOLUME_RLQ_R4PN)
290 return ("RAID4-PN");
291 return ("RAID4");
292 case G_RAID_VOLUME_RL_RAID5:
293 if (qual == G_RAID_VOLUME_RLQ_R5RA)
294 return ("RAID5-RA");
295 if (qual == G_RAID_VOLUME_RLQ_R5RS)
296 return ("RAID5-RS");
297 if (qual == G_RAID_VOLUME_RLQ_R5LA)
298 return ("RAID5-LA");
299 if (qual == G_RAID_VOLUME_RLQ_R5LS)
300 return ("RAID5-LS");
301 return ("RAID5");
302 case G_RAID_VOLUME_RL_RAID6:
303 if (qual == G_RAID_VOLUME_RLQ_R6RA)
304 return ("RAID6-RA");
305 if (qual == G_RAID_VOLUME_RLQ_R6RS)
306 return ("RAID6-RS");
307 if (qual == G_RAID_VOLUME_RLQ_R6LA)
308 return ("RAID6-LA");
309 if (qual == G_RAID_VOLUME_RLQ_R6LS)
310 return ("RAID6-LS");
311 return ("RAID6");
312 case G_RAID_VOLUME_RL_RAIDMDF:
313 if (qual == G_RAID_VOLUME_RLQ_RMDFRA)
314 return ("RAIDMDF-RA");
315 if (qual == G_RAID_VOLUME_RLQ_RMDFRS)
316 return ("RAIDMDF-RS");
317 if (qual == G_RAID_VOLUME_RLQ_RMDFLA)
318 return ("RAIDMDF-LA");
319 if (qual == G_RAID_VOLUME_RLQ_RMDFLS)
320 return ("RAIDMDF-LS");
321 return ("RAIDMDF");
322 case G_RAID_VOLUME_RL_RAID1E:
323 if (qual == G_RAID_VOLUME_RLQ_R1EA)
324 return ("RAID1E-A");
325 if (qual == G_RAID_VOLUME_RLQ_R1EO)
326 return ("RAID1E-O");
327 return ("RAID1E");
328 case G_RAID_VOLUME_RL_SINGLE:
329 return ("SINGLE");
330 case G_RAID_VOLUME_RL_CONCAT:
331 return ("CONCAT");
332 case G_RAID_VOLUME_RL_RAID5E:
333 if (qual == G_RAID_VOLUME_RLQ_R5ERA)
334 return ("RAID5E-RA");
335 if (qual == G_RAID_VOLUME_RLQ_R5ERS)
336 return ("RAID5E-RS");
337 if (qual == G_RAID_VOLUME_RLQ_R5ELA)
338 return ("RAID5E-LA");
339 if (qual == G_RAID_VOLUME_RLQ_R5ELS)
340 return ("RAID5E-LS");
341 return ("RAID5E");
342 case G_RAID_VOLUME_RL_RAID5EE:
343 if (qual == G_RAID_VOLUME_RLQ_R5EERA)
344 return ("RAID5EE-RA");
345 if (qual == G_RAID_VOLUME_RLQ_R5EERS)
346 return ("RAID5EE-RS");
347 if (qual == G_RAID_VOLUME_RLQ_R5EELA)
348 return ("RAID5EE-LA");
349 if (qual == G_RAID_VOLUME_RLQ_R5EELS)
350 return ("RAID5EE-LS");
351 return ("RAID5EE");
352 case G_RAID_VOLUME_RL_RAID5R:
353 if (qual == G_RAID_VOLUME_RLQ_R5RRA)
354 return ("RAID5R-RA");
355 if (qual == G_RAID_VOLUME_RLQ_R5RRS)
356 return ("RAID5R-RS");
357 if (qual == G_RAID_VOLUME_RLQ_R5RLA)
358 return ("RAID5R-LA");
359 if (qual == G_RAID_VOLUME_RLQ_R5RLS)
360 return ("RAID5R-LS");
361 return ("RAID5E");
362 default:
363 return ("UNKNOWN");
364 }
365 }
366
367 int
368 g_raid_volume_str2level(const char *str, int *level, int *qual)
369 {
370
371 *level = G_RAID_VOLUME_RL_UNKNOWN;
372 *qual = G_RAID_VOLUME_RLQ_NONE;
373 if (strcasecmp(str, "RAID0") == 0)
374 *level = G_RAID_VOLUME_RL_RAID0;
375 else if (strcasecmp(str, "RAID1") == 0)
376 *level = G_RAID_VOLUME_RL_RAID1;
377 else if (strcasecmp(str, "RAID3-P0") == 0) {
378 *level = G_RAID_VOLUME_RL_RAID3;
379 *qual = G_RAID_VOLUME_RLQ_R3P0;
380 } else if (strcasecmp(str, "RAID3-PN") == 0 ||
381 strcasecmp(str, "RAID3") == 0) {
382 *level = G_RAID_VOLUME_RL_RAID3;
383 *qual = G_RAID_VOLUME_RLQ_R3PN;
384 } else if (strcasecmp(str, "RAID4-P0") == 0) {
385 *level = G_RAID_VOLUME_RL_RAID4;
386 *qual = G_RAID_VOLUME_RLQ_R4P0;
387 } else if (strcasecmp(str, "RAID4-PN") == 0 ||
388 strcasecmp(str, "RAID4") == 0) {
389 *level = G_RAID_VOLUME_RL_RAID4;
390 *qual = G_RAID_VOLUME_RLQ_R4PN;
391 } else if (strcasecmp(str, "RAID5-RA") == 0) {
392 *level = G_RAID_VOLUME_RL_RAID5;
393 *qual = G_RAID_VOLUME_RLQ_R5RA;
394 } else if (strcasecmp(str, "RAID5-RS") == 0) {
395 *level = G_RAID_VOLUME_RL_RAID5;
396 *qual = G_RAID_VOLUME_RLQ_R5RS;
397 } else if (strcasecmp(str, "RAID5") == 0 ||
398 strcasecmp(str, "RAID5-LA") == 0) {
399 *level = G_RAID_VOLUME_RL_RAID5;
400 *qual = G_RAID_VOLUME_RLQ_R5LA;
401 } else if (strcasecmp(str, "RAID5-LS") == 0) {
402 *level = G_RAID_VOLUME_RL_RAID5;
403 *qual = G_RAID_VOLUME_RLQ_R5LS;
404 } else if (strcasecmp(str, "RAID6-RA") == 0) {
405 *level = G_RAID_VOLUME_RL_RAID6;
406 *qual = G_RAID_VOLUME_RLQ_R6RA;
407 } else if (strcasecmp(str, "RAID6-RS") == 0) {
408 *level = G_RAID_VOLUME_RL_RAID6;
409 *qual = G_RAID_VOLUME_RLQ_R6RS;
410 } else if (strcasecmp(str, "RAID6") == 0 ||
411 strcasecmp(str, "RAID6-LA") == 0) {
412 *level = G_RAID_VOLUME_RL_RAID6;
413 *qual = G_RAID_VOLUME_RLQ_R6LA;
414 } else if (strcasecmp(str, "RAID6-LS") == 0) {
415 *level = G_RAID_VOLUME_RL_RAID6;
416 *qual = G_RAID_VOLUME_RLQ_R6LS;
417 } else if (strcasecmp(str, "RAIDMDF-RA") == 0) {
418 *level = G_RAID_VOLUME_RL_RAIDMDF;
419 *qual = G_RAID_VOLUME_RLQ_RMDFRA;
420 } else if (strcasecmp(str, "RAIDMDF-RS") == 0) {
421 *level = G_RAID_VOLUME_RL_RAIDMDF;
422 *qual = G_RAID_VOLUME_RLQ_RMDFRS;
423 } else if (strcasecmp(str, "RAIDMDF") == 0 ||
424 strcasecmp(str, "RAIDMDF-LA") == 0) {
425 *level = G_RAID_VOLUME_RL_RAIDMDF;
426 *qual = G_RAID_VOLUME_RLQ_RMDFLA;
427 } else if (strcasecmp(str, "RAIDMDF-LS") == 0) {
428 *level = G_RAID_VOLUME_RL_RAIDMDF;
429 *qual = G_RAID_VOLUME_RLQ_RMDFLS;
430 } else if (strcasecmp(str, "RAID10") == 0 ||
431 strcasecmp(str, "RAID1E") == 0 ||
432 strcasecmp(str, "RAID1E-A") == 0) {
433 *level = G_RAID_VOLUME_RL_RAID1E;
434 *qual = G_RAID_VOLUME_RLQ_R1EA;
435 } else if (strcasecmp(str, "RAID1E-O") == 0) {
436 *level = G_RAID_VOLUME_RL_RAID1E;
437 *qual = G_RAID_VOLUME_RLQ_R1EO;
438 } else if (strcasecmp(str, "SINGLE") == 0)
439 *level = G_RAID_VOLUME_RL_SINGLE;
440 else if (strcasecmp(str, "CONCAT") == 0)
441 *level = G_RAID_VOLUME_RL_CONCAT;
442 else if (strcasecmp(str, "RAID5E-RA") == 0) {
443 *level = G_RAID_VOLUME_RL_RAID5E;
444 *qual = G_RAID_VOLUME_RLQ_R5ERA;
445 } else if (strcasecmp(str, "RAID5E-RS") == 0) {
446 *level = G_RAID_VOLUME_RL_RAID5E;
447 *qual = G_RAID_VOLUME_RLQ_R5ERS;
448 } else if (strcasecmp(str, "RAID5E") == 0 ||
449 strcasecmp(str, "RAID5E-LA") == 0) {
450 *level = G_RAID_VOLUME_RL_RAID5E;
451 *qual = G_RAID_VOLUME_RLQ_R5ELA;
452 } else if (strcasecmp(str, "RAID5E-LS") == 0) {
453 *level = G_RAID_VOLUME_RL_RAID5E;
454 *qual = G_RAID_VOLUME_RLQ_R5ELS;
455 } else if (strcasecmp(str, "RAID5EE-RA") == 0) {
456 *level = G_RAID_VOLUME_RL_RAID5EE;
457 *qual = G_RAID_VOLUME_RLQ_R5EERA;
458 } else if (strcasecmp(str, "RAID5EE-RS") == 0) {
459 *level = G_RAID_VOLUME_RL_RAID5EE;
460 *qual = G_RAID_VOLUME_RLQ_R5EERS;
461 } else if (strcasecmp(str, "RAID5EE") == 0 ||
462 strcasecmp(str, "RAID5EE-LA") == 0) {
463 *level = G_RAID_VOLUME_RL_RAID5EE;
464 *qual = G_RAID_VOLUME_RLQ_R5EELA;
465 } else if (strcasecmp(str, "RAID5EE-LS") == 0) {
466 *level = G_RAID_VOLUME_RL_RAID5EE;
467 *qual = G_RAID_VOLUME_RLQ_R5EELS;
468 } else if (strcasecmp(str, "RAID5R-RA") == 0) {
469 *level = G_RAID_VOLUME_RL_RAID5R;
470 *qual = G_RAID_VOLUME_RLQ_R5RRA;
471 } else if (strcasecmp(str, "RAID5R-RS") == 0) {
472 *level = G_RAID_VOLUME_RL_RAID5R;
473 *qual = G_RAID_VOLUME_RLQ_R5RRS;
474 } else if (strcasecmp(str, "RAID5R") == 0 ||
475 strcasecmp(str, "RAID5R-LA") == 0) {
476 *level = G_RAID_VOLUME_RL_RAID5R;
477 *qual = G_RAID_VOLUME_RLQ_R5RLA;
478 } else if (strcasecmp(str, "RAID5R-LS") == 0) {
479 *level = G_RAID_VOLUME_RL_RAID5R;
480 *qual = G_RAID_VOLUME_RLQ_R5RLS;
481 } else
482 return (-1);
483 return (0);
484 }
485
486 const char *
487 g_raid_get_diskname(struct g_raid_disk *disk)
488 {
489
490 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
491 return ("[unknown]");
492 return (disk->d_consumer->provider->name);
493 }
494
495 void
496 g_raid_get_disk_info(struct g_raid_disk *disk)
497 {
498 struct g_consumer *cp = disk->d_consumer;
499 int error, len;
500
501 /* Read kernel dumping information. */
502 disk->d_kd.offset = 0;
503 disk->d_kd.length = OFF_MAX;
504 len = sizeof(disk->d_kd);
505 error = g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd);
506 if (error)
507 disk->d_kd.di.dumper = NULL;
508 if (disk->d_kd.di.dumper == NULL)
509 G_RAID_DEBUG1(2, disk->d_softc,
510 "Dumping not supported by %s: %d.",
511 cp->provider->name, error);
512
513 /* Read BIO_DELETE support. */
514 error = g_getattr("GEOM::candelete", cp, &disk->d_candelete);
515 if (error)
516 disk->d_candelete = 0;
517 if (!disk->d_candelete)
518 G_RAID_DEBUG1(2, disk->d_softc,
519 "BIO_DELETE not supported by %s: %d.",
520 cp->provider->name, error);
521 }
522
523 void
524 g_raid_report_disk_state(struct g_raid_disk *disk)
525 {
526 struct g_raid_subdisk *sd;
527 int len, state;
528 uint32_t s;
529
530 if (disk->d_consumer == NULL)
531 return;
532 if (disk->d_state == G_RAID_DISK_S_DISABLED) {
533 s = G_STATE_ACTIVE; /* XXX */
534 } else if (disk->d_state == G_RAID_DISK_S_FAILED ||
535 disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
536 s = G_STATE_FAILED;
537 } else {
538 state = G_RAID_SUBDISK_S_ACTIVE;
539 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
540 if (sd->sd_state < state)
541 state = sd->sd_state;
542 }
543 if (state == G_RAID_SUBDISK_S_FAILED)
544 s = G_STATE_FAILED;
545 else if (state == G_RAID_SUBDISK_S_NEW ||
546 state == G_RAID_SUBDISK_S_REBUILD)
547 s = G_STATE_REBUILD;
548 else if (state == G_RAID_SUBDISK_S_STALE ||
549 state == G_RAID_SUBDISK_S_RESYNC)
550 s = G_STATE_RESYNC;
551 else
552 s = G_STATE_ACTIVE;
553 }
554 len = sizeof(s);
555 g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
556 G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.",
557 g_raid_get_diskname(disk), s);
558 }
559
560 void
561 g_raid_change_disk_state(struct g_raid_disk *disk, int state)
562 {
563
564 G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.",
565 g_raid_get_diskname(disk),
566 g_raid_disk_state2str(disk->d_state),
567 g_raid_disk_state2str(state));
568 disk->d_state = state;
569 g_raid_report_disk_state(disk);
570 }
571
572 void
573 g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state)
574 {
575
576 G_RAID_DEBUG1(0, sd->sd_softc,
577 "Subdisk %s:%d-%s state changed from %s to %s.",
578 sd->sd_volume->v_name, sd->sd_pos,
579 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]",
580 g_raid_subdisk_state2str(sd->sd_state),
581 g_raid_subdisk_state2str(state));
582 sd->sd_state = state;
583 if (sd->sd_disk)
584 g_raid_report_disk_state(sd->sd_disk);
585 }
586
587 void
588 g_raid_change_volume_state(struct g_raid_volume *vol, int state)
589 {
590
591 G_RAID_DEBUG1(0, vol->v_softc,
592 "Volume %s state changed from %s to %s.",
593 vol->v_name,
594 g_raid_volume_state2str(vol->v_state),
595 g_raid_volume_state2str(state));
596 vol->v_state = state;
597 }
598
599 /*
600 * --- Events handling functions ---
601 * Events in geom_raid are used to maintain subdisks and volumes status
602 * from one thread to simplify locking.
603 */
604 static void
605 g_raid_event_free(struct g_raid_event *ep)
606 {
607
608 free(ep, M_RAID);
609 }
610
611 int
612 g_raid_event_send(void *arg, int event, int flags)
613 {
614 struct g_raid_softc *sc;
615 struct g_raid_event *ep;
616 int error;
617
618 if ((flags & G_RAID_EVENT_VOLUME) != 0) {
619 sc = ((struct g_raid_volume *)arg)->v_softc;
620 } else if ((flags & G_RAID_EVENT_DISK) != 0) {
621 sc = ((struct g_raid_disk *)arg)->d_softc;
622 } else if ((flags & G_RAID_EVENT_SUBDISK) != 0) {
623 sc = ((struct g_raid_subdisk *)arg)->sd_softc;
624 } else {
625 sc = arg;
626 }
627 ep = malloc(sizeof(*ep), M_RAID,
628 sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT);
629 if (ep == NULL)
630 return (ENOMEM);
631 ep->e_tgt = arg;
632 ep->e_event = event;
633 ep->e_flags = flags;
634 ep->e_error = 0;
635 G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc);
636 mtx_lock(&sc->sc_queue_mtx);
637 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
638 mtx_unlock(&sc->sc_queue_mtx);
639 wakeup(sc);
640
641 if ((flags & G_RAID_EVENT_WAIT) == 0)
642 return (0);
643
644 sx_assert(&sc->sc_lock, SX_XLOCKED);
645 G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep);
646 sx_xunlock(&sc->sc_lock);
647 while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) {
648 mtx_lock(&sc->sc_queue_mtx);
649 MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event",
650 hz * 5);
651 }
652 error = ep->e_error;
653 g_raid_event_free(ep);
654 sx_xlock(&sc->sc_lock);
655 return (error);
656 }
657
658 static void
659 g_raid_event_cancel(struct g_raid_softc *sc, void *tgt)
660 {
661 struct g_raid_event *ep, *tmpep;
662
663 sx_assert(&sc->sc_lock, SX_XLOCKED);
664
665 mtx_lock(&sc->sc_queue_mtx);
666 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
667 if (ep->e_tgt != tgt)
668 continue;
669 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
670 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0)
671 g_raid_event_free(ep);
672 else {
673 ep->e_error = ECANCELED;
674 wakeup(ep);
675 }
676 }
677 mtx_unlock(&sc->sc_queue_mtx);
678 }
679
680 static int
681 g_raid_event_check(struct g_raid_softc *sc, void *tgt)
682 {
683 struct g_raid_event *ep;
684 int res = 0;
685
686 sx_assert(&sc->sc_lock, SX_XLOCKED);
687
688 mtx_lock(&sc->sc_queue_mtx);
689 TAILQ_FOREACH(ep, &sc->sc_events, e_next) {
690 if (ep->e_tgt != tgt)
691 continue;
692 res = 1;
693 break;
694 }
695 mtx_unlock(&sc->sc_queue_mtx);
696 return (res);
697 }
698
699 /*
700 * Return the number of disks in given state.
701 * If state is equal to -1, count all connected disks.
702 */
703 u_int
704 g_raid_ndisks(struct g_raid_softc *sc, int state)
705 {
706 struct g_raid_disk *disk;
707 u_int n;
708
709 sx_assert(&sc->sc_lock, SX_LOCKED);
710
711 n = 0;
712 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
713 if (disk->d_state == state || state == -1)
714 n++;
715 }
716 return (n);
717 }
718
719 /*
720 * Return the number of subdisks in given state.
721 * If state is equal to -1, count all connected disks.
722 */
723 u_int
724 g_raid_nsubdisks(struct g_raid_volume *vol, int state)
725 {
726 struct g_raid_subdisk *subdisk;
727 struct g_raid_softc *sc;
728 u_int i, n ;
729
730 sc = vol->v_softc;
731 sx_assert(&sc->sc_lock, SX_LOCKED);
732
733 n = 0;
734 for (i = 0; i < vol->v_disks_count; i++) {
735 subdisk = &vol->v_subdisks[i];
736 if ((state == -1 &&
737 subdisk->sd_state != G_RAID_SUBDISK_S_NONE) ||
738 subdisk->sd_state == state)
739 n++;
740 }
741 return (n);
742 }
743
744 /*
745 * Return the first subdisk in given state.
746 * If state is equal to -1, then the first connected disks.
747 */
748 struct g_raid_subdisk *
749 g_raid_get_subdisk(struct g_raid_volume *vol, int state)
750 {
751 struct g_raid_subdisk *sd;
752 struct g_raid_softc *sc;
753 u_int i;
754
755 sc = vol->v_softc;
756 sx_assert(&sc->sc_lock, SX_LOCKED);
757
758 for (i = 0; i < vol->v_disks_count; i++) {
759 sd = &vol->v_subdisks[i];
760 if ((state == -1 &&
761 sd->sd_state != G_RAID_SUBDISK_S_NONE) ||
762 sd->sd_state == state)
763 return (sd);
764 }
765 return (NULL);
766 }
767
768 struct g_consumer *
769 g_raid_open_consumer(struct g_raid_softc *sc, const char *name)
770 {
771 struct g_consumer *cp;
772 struct g_provider *pp;
773
774 g_topology_assert();
775
776 if (strncmp(name, "/dev/", 5) == 0)
777 name += 5;
778 pp = g_provider_by_name(name);
779 if (pp == NULL)
780 return (NULL);
781 cp = g_new_consumer(sc->sc_geom);
782 cp->flags |= G_CF_DIRECT_RECEIVE;
783 if (g_attach(cp, pp) != 0) {
784 g_destroy_consumer(cp);
785 return (NULL);
786 }
787 if (g_access(cp, 1, 1, 1) != 0) {
788 g_detach(cp);
789 g_destroy_consumer(cp);
790 return (NULL);
791 }
792 return (cp);
793 }
794
795 static u_int
796 g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp)
797 {
798 struct bio *bp;
799 u_int nreqs = 0;
800
801 mtx_lock(&sc->sc_queue_mtx);
802 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
803 if (bp->bio_from == cp)
804 nreqs++;
805 }
806 mtx_unlock(&sc->sc_queue_mtx);
807 return (nreqs);
808 }
809
810 u_int
811 g_raid_nopens(struct g_raid_softc *sc)
812 {
813 struct g_raid_volume *vol;
814 u_int opens;
815
816 opens = 0;
817 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
818 if (vol->v_provider_open != 0)
819 opens++;
820 }
821 return (opens);
822 }
823
824 static int
825 g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp)
826 {
827
828 if (cp->index > 0) {
829 G_RAID_DEBUG1(2, sc,
830 "I/O requests for %s exist, can't destroy it now.",
831 cp->provider->name);
832 return (1);
833 }
834 if (g_raid_nrequests(sc, cp) > 0) {
835 G_RAID_DEBUG1(2, sc,
836 "I/O requests for %s in queue, can't destroy it now.",
837 cp->provider->name);
838 return (1);
839 }
840 return (0);
841 }
842
843 static void
844 g_raid_destroy_consumer(void *arg, int flags __unused)
845 {
846 struct g_consumer *cp;
847
848 g_topology_assert();
849
850 cp = arg;
851 G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
852 g_detach(cp);
853 g_destroy_consumer(cp);
854 }
855
856 void
857 g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp)
858 {
859 struct g_provider *pp;
860 int retaste_wait;
861
862 g_topology_assert_not();
863
864 g_topology_lock();
865 cp->private = NULL;
866 if (g_raid_consumer_is_busy(sc, cp))
867 goto out;
868 pp = cp->provider;
869 retaste_wait = 0;
870 if (cp->acw == 1) {
871 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
872 retaste_wait = 1;
873 }
874 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
875 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
876 if (retaste_wait) {
877 /*
878 * After retaste event was send (inside g_access()), we can send
879 * event to detach and destroy consumer.
880 * A class, which has consumer to the given provider connected
881 * will not receive retaste event for the provider.
882 * This is the way how I ignore retaste events when I close
883 * consumers opened for write: I detach and destroy consumer
884 * after retaste event is sent.
885 */
886 g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL);
887 goto out;
888 }
889 G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name);
890 g_detach(cp);
891 g_destroy_consumer(cp);
892 out:
893 g_topology_unlock();
894 }
895
896 static void
897 g_raid_orphan(struct g_consumer *cp)
898 {
899 struct g_raid_disk *disk;
900
901 g_topology_assert();
902
903 disk = cp->private;
904 if (disk == NULL)
905 return;
906 g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED,
907 G_RAID_EVENT_DISK);
908 }
909
910 static void
911 g_raid_clean(struct g_raid_volume *vol, int acw)
912 {
913 struct g_raid_softc *sc;
914 int timeout;
915
916 sc = vol->v_softc;
917 g_topology_assert_not();
918 sx_assert(&sc->sc_lock, SX_XLOCKED);
919
920 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
921 // return;
922 if (!vol->v_dirty)
923 return;
924 if (vol->v_writes > 0)
925 return;
926 if (acw > 0 || (acw == -1 &&
927 vol->v_provider != NULL && vol->v_provider->acw > 0)) {
928 timeout = g_raid_clean_time - (time_uptime - vol->v_last_write);
929 if (!g_raid_shutdown && timeout > 0)
930 return;
931 }
932 vol->v_dirty = 0;
933 G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.",
934 vol->v_name);
935 g_raid_write_metadata(sc, vol, NULL, NULL);
936 }
937
938 static void
939 g_raid_dirty(struct g_raid_volume *vol)
940 {
941 struct g_raid_softc *sc;
942
943 sc = vol->v_softc;
944 g_topology_assert_not();
945 sx_assert(&sc->sc_lock, SX_XLOCKED);
946
947 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
948 // return;
949 vol->v_dirty = 1;
950 G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.",
951 vol->v_name);
952 g_raid_write_metadata(sc, vol, NULL, NULL);
953 }
954
955 void
956 g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp)
957 {
958 struct g_raid_volume *vol;
959 struct g_raid_subdisk *sd;
960 struct bio_queue_head queue;
961 struct bio *cbp;
962 int i;
963
964 vol = tr->tro_volume;
965
966 /*
967 * Allocate all bios before sending any request, so we can return
968 * ENOMEM in nice and clean way.
969 */
970 bioq_init(&queue);
971 for (i = 0; i < vol->v_disks_count; i++) {
972 sd = &vol->v_subdisks[i];
973 if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
974 sd->sd_state == G_RAID_SUBDISK_S_FAILED)
975 continue;
976 cbp = g_clone_bio(bp);
977 if (cbp == NULL)
978 goto failure;
979 cbp->bio_caller1 = sd;
980 bioq_insert_tail(&queue, cbp);
981 }
982 while ((cbp = bioq_takefirst(&queue)) != NULL) {
983 sd = cbp->bio_caller1;
984 cbp->bio_caller1 = NULL;
985 g_raid_subdisk_iostart(sd, cbp);
986 }
987 return;
988 failure:
989 while ((cbp = bioq_takefirst(&queue)) != NULL)
990 g_destroy_bio(cbp);
991 if (bp->bio_error == 0)
992 bp->bio_error = ENOMEM;
993 g_raid_iodone(bp, bp->bio_error);
994 }
995
996 static void
997 g_raid_tr_kerneldump_common_done(struct bio *bp)
998 {
999
1000 bp->bio_flags |= BIO_DONE;
1001 }
1002
1003 int
1004 g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
1005 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1006 {
1007 struct g_raid_softc *sc;
1008 struct g_raid_volume *vol;
1009 struct bio bp;
1010
1011 vol = tr->tro_volume;
1012 sc = vol->v_softc;
1013
1014 g_reset_bio(&bp);
1015 bp.bio_cmd = BIO_WRITE;
1016 bp.bio_done = g_raid_tr_kerneldump_common_done;
1017 bp.bio_attribute = NULL;
1018 bp.bio_offset = offset;
1019 bp.bio_length = length;
1020 bp.bio_data = virtual;
1021 bp.bio_to = vol->v_provider;
1022
1023 g_raid_start(&bp);
1024 while (!(bp.bio_flags & BIO_DONE)) {
1025 G_RAID_DEBUG1(4, sc, "Poll...");
1026 g_raid_poll(sc);
1027 DELAY(10);
1028 }
1029
1030 return (bp.bio_error != 0 ? EIO : 0);
1031 }
1032
1033 static int
1034 g_raid_dump(void *arg,
1035 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1036 {
1037 struct g_raid_volume *vol;
1038 int error;
1039
1040 vol = (struct g_raid_volume *)arg;
1041 G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.",
1042 (long long unsigned)offset, (long long unsigned)length);
1043
1044 error = G_RAID_TR_KERNELDUMP(vol->v_tr,
1045 virtual, physical, offset, length);
1046 return (error);
1047 }
1048
1049 static void
1050 g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp)
1051 {
1052 struct g_kerneldump *gkd;
1053 struct g_provider *pp;
1054 struct g_raid_volume *vol;
1055
1056 gkd = (struct g_kerneldump*)bp->bio_data;
1057 pp = bp->bio_to;
1058 vol = pp->private;
1059 g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)",
1060 pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
1061 gkd->di.dumper = g_raid_dump;
1062 gkd->di.priv = vol;
1063 gkd->di.blocksize = vol->v_sectorsize;
1064 gkd->di.maxiosize = DFLTPHYS;
1065 gkd->di.mediaoffset = gkd->offset;
1066 if ((gkd->offset + gkd->length) > vol->v_mediasize)
1067 gkd->length = vol->v_mediasize - gkd->offset;
1068 gkd->di.mediasize = gkd->length;
1069 g_io_deliver(bp, 0);
1070 }
1071
1072 static void
1073 g_raid_candelete(struct g_raid_softc *sc, struct bio *bp)
1074 {
1075 struct g_provider *pp;
1076 struct g_raid_volume *vol;
1077 struct g_raid_subdisk *sd;
1078 int *val;
1079 int i;
1080
1081 val = (int *)bp->bio_data;
1082 pp = bp->bio_to;
1083 vol = pp->private;
1084 *val = 0;
1085 for (i = 0; i < vol->v_disks_count; i++) {
1086 sd = &vol->v_subdisks[i];
1087 if (sd->sd_state == G_RAID_SUBDISK_S_NONE)
1088 continue;
1089 if (sd->sd_disk->d_candelete) {
1090 *val = 1;
1091 break;
1092 }
1093 }
1094 g_io_deliver(bp, 0);
1095 }
1096
1097 static void
1098 g_raid_start(struct bio *bp)
1099 {
1100 struct g_raid_softc *sc;
1101
1102 sc = bp->bio_to->geom->softc;
1103 /*
1104 * If sc == NULL or there are no valid disks, provider's error
1105 * should be set and g_raid_start() should not be called at all.
1106 */
1107 // KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING,
1108 // ("Provider's error should be set (error=%d)(mirror=%s).",
1109 // bp->bio_to->error, bp->bio_to->name));
1110 G_RAID_LOGREQ(3, bp, "Request received.");
1111
1112 switch (bp->bio_cmd) {
1113 case BIO_READ:
1114 case BIO_WRITE:
1115 case BIO_DELETE:
1116 case BIO_FLUSH:
1117 break;
1118 case BIO_GETATTR:
1119 if (!strcmp(bp->bio_attribute, "GEOM::candelete"))
1120 g_raid_candelete(sc, bp);
1121 else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
1122 g_raid_kerneldump(sc, bp);
1123 else
1124 g_io_deliver(bp, EOPNOTSUPP);
1125 return;
1126 default:
1127 g_io_deliver(bp, EOPNOTSUPP);
1128 return;
1129 }
1130 mtx_lock(&sc->sc_queue_mtx);
1131 bioq_insert_tail(&sc->sc_queue, bp);
1132 mtx_unlock(&sc->sc_queue_mtx);
1133 if (!dumping) {
1134 G_RAID_DEBUG1(4, sc, "Waking up %p.", sc);
1135 wakeup(sc);
1136 }
1137 }
1138
1139 static int
1140 g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len)
1141 {
1142 /*
1143 * 5 cases:
1144 * (1) bp entirely below NO
1145 * (2) bp entirely above NO
1146 * (3) bp start below, but end in range YES
1147 * (4) bp entirely within YES
1148 * (5) bp starts within, ends above YES
1149 *
1150 * lock range 10-19 (offset 10 length 10)
1151 * (1) 1-5: first if kicks it out
1152 * (2) 30-35: second if kicks it out
1153 * (3) 5-15: passes both ifs
1154 * (4) 12-14: passes both ifs
1155 * (5) 19-20: passes both
1156 */
1157 off_t lend = lstart + len - 1;
1158 off_t bstart = bp->bio_offset;
1159 off_t bend = bp->bio_offset + bp->bio_length - 1;
1160
1161 if (bend < lstart)
1162 return (0);
1163 if (lend < bstart)
1164 return (0);
1165 return (1);
1166 }
1167
1168 static int
1169 g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp)
1170 {
1171 struct g_raid_lock *lp;
1172
1173 sx_assert(&vol->v_softc->sc_lock, SX_LOCKED);
1174
1175 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1176 if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length))
1177 return (1);
1178 }
1179 return (0);
1180 }
1181
1182 static void
1183 g_raid_start_request(struct bio *bp)
1184 {
1185 struct g_raid_softc *sc;
1186 struct g_raid_volume *vol;
1187
1188 sc = bp->bio_to->geom->softc;
1189 sx_assert(&sc->sc_lock, SX_LOCKED);
1190 vol = bp->bio_to->private;
1191
1192 /*
1193 * Check to see if this item is in a locked range. If so,
1194 * queue it to our locked queue and return. We'll requeue
1195 * it when the range is unlocked. Internal I/O for the
1196 * rebuild/rescan/recovery process is excluded from this
1197 * check so we can actually do the recovery.
1198 */
1199 if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) &&
1200 g_raid_is_in_locked_range(vol, bp)) {
1201 G_RAID_LOGREQ(3, bp, "Defer request.");
1202 bioq_insert_tail(&vol->v_locked, bp);
1203 return;
1204 }
1205
1206 /*
1207 * If we're actually going to do the write/delete, then
1208 * update the idle stats for the volume.
1209 */
1210 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1211 if (!vol->v_dirty)
1212 g_raid_dirty(vol);
1213 vol->v_writes++;
1214 }
1215
1216 /*
1217 * Put request onto inflight queue, so we can check if new
1218 * synchronization requests don't collide with it. Then tell
1219 * the transformation layer to start the I/O.
1220 */
1221 bioq_insert_tail(&vol->v_inflight, bp);
1222 G_RAID_LOGREQ(4, bp, "Request started");
1223 G_RAID_TR_IOSTART(vol->v_tr, bp);
1224 }
1225
1226 static void
1227 g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp)
1228 {
1229 off_t off, len;
1230 struct bio *nbp;
1231 struct g_raid_lock *lp;
1232
1233 vol->v_pending_lock = 0;
1234 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1235 if (lp->l_pending) {
1236 off = lp->l_offset;
1237 len = lp->l_length;
1238 lp->l_pending = 0;
1239 TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) {
1240 if (g_raid_bio_overlaps(nbp, off, len))
1241 lp->l_pending++;
1242 }
1243 if (lp->l_pending) {
1244 vol->v_pending_lock = 1;
1245 G_RAID_DEBUG1(4, vol->v_softc,
1246 "Deferred lock(%jd, %jd) has %d pending",
1247 (intmax_t)off, (intmax_t)(off + len),
1248 lp->l_pending);
1249 continue;
1250 }
1251 G_RAID_DEBUG1(4, vol->v_softc,
1252 "Deferred lock of %jd to %jd completed",
1253 (intmax_t)off, (intmax_t)(off + len));
1254 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1255 }
1256 }
1257 }
1258
1259 void
1260 g_raid_iodone(struct bio *bp, int error)
1261 {
1262 struct g_raid_softc *sc;
1263 struct g_raid_volume *vol;
1264
1265 sc = bp->bio_to->geom->softc;
1266 sx_assert(&sc->sc_lock, SX_LOCKED);
1267 vol = bp->bio_to->private;
1268 G_RAID_LOGREQ(3, bp, "Request done: %d.", error);
1269
1270 /* Update stats if we done write/delete. */
1271 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1272 vol->v_writes--;
1273 vol->v_last_write = time_uptime;
1274 }
1275
1276 bioq_remove(&vol->v_inflight, bp);
1277 if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp))
1278 g_raid_finish_with_locked_ranges(vol, bp);
1279 getmicrouptime(&vol->v_last_done);
1280 g_io_deliver(bp, error);
1281 }
1282
1283 int
1284 g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
1285 struct bio *ignore, void *argp)
1286 {
1287 struct g_raid_softc *sc;
1288 struct g_raid_lock *lp;
1289 struct bio *bp;
1290
1291 sc = vol->v_softc;
1292 lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO);
1293 LIST_INSERT_HEAD(&vol->v_locks, lp, l_next);
1294 lp->l_offset = off;
1295 lp->l_length = len;
1296 lp->l_callback_arg = argp;
1297
1298 lp->l_pending = 0;
1299 TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) {
1300 if (bp != ignore && g_raid_bio_overlaps(bp, off, len))
1301 lp->l_pending++;
1302 }
1303
1304 /*
1305 * If there are any writes that are pending, we return EBUSY. All
1306 * callers will have to wait until all pending writes clear.
1307 */
1308 if (lp->l_pending > 0) {
1309 vol->v_pending_lock = 1;
1310 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend",
1311 (intmax_t)off, (intmax_t)(off+len), lp->l_pending);
1312 return (EBUSY);
1313 }
1314 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd",
1315 (intmax_t)off, (intmax_t)(off+len));
1316 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1317 return (0);
1318 }
1319
1320 int
1321 g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len)
1322 {
1323 struct g_raid_lock *lp;
1324 struct g_raid_softc *sc;
1325 struct bio *bp;
1326
1327 sc = vol->v_softc;
1328 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1329 if (lp->l_offset == off && lp->l_length == len) {
1330 LIST_REMOVE(lp, l_next);
1331 /* XXX
1332 * Right now we just put them all back on the queue
1333 * and hope for the best. We hope this because any
1334 * locked ranges will go right back on this list
1335 * when the worker thread runs.
1336 * XXX
1337 */
1338 G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd",
1339 (intmax_t)lp->l_offset,
1340 (intmax_t)(lp->l_offset+lp->l_length));
1341 mtx_lock(&sc->sc_queue_mtx);
1342 while ((bp = bioq_takefirst(&vol->v_locked)) != NULL)
1343 bioq_insert_tail(&sc->sc_queue, bp);
1344 mtx_unlock(&sc->sc_queue_mtx);
1345 free(lp, M_RAID);
1346 return (0);
1347 }
1348 }
1349 return (EINVAL);
1350 }
1351
1352 void
1353 g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp)
1354 {
1355 struct g_consumer *cp;
1356 struct g_raid_disk *disk, *tdisk;
1357
1358 bp->bio_caller1 = sd;
1359
1360 /*
1361 * Make sure that the disk is present. Generally it is a task of
1362 * transformation layers to not send requests to absent disks, but
1363 * it is better to be safe and report situation then sorry.
1364 */
1365 if (sd->sd_disk == NULL) {
1366 G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!");
1367 nodisk:
1368 bp->bio_from = NULL;
1369 bp->bio_to = NULL;
1370 bp->bio_error = ENXIO;
1371 g_raid_disk_done(bp);
1372 return;
1373 }
1374 disk = sd->sd_disk;
1375 if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
1376 disk->d_state != G_RAID_DISK_S_FAILED) {
1377 G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a "
1378 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
1379 goto nodisk;
1380 }
1381
1382 cp = disk->d_consumer;
1383 bp->bio_from = cp;
1384 bp->bio_to = cp->provider;
1385 cp->index++;
1386
1387 /* Update average disks load. */
1388 TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) {
1389 if (tdisk->d_consumer == NULL)
1390 tdisk->d_load = 0;
1391 else
1392 tdisk->d_load = (tdisk->d_consumer->index *
1393 G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8;
1394 }
1395
1396 disk->d_last_offset = bp->bio_offset + bp->bio_length;
1397 if (dumping) {
1398 G_RAID_LOGREQ(3, bp, "Sending dumping request.");
1399 if (bp->bio_cmd == BIO_WRITE) {
1400 bp->bio_error = g_raid_subdisk_kerneldump(sd,
1401 bp->bio_data, 0, bp->bio_offset, bp->bio_length);
1402 } else
1403 bp->bio_error = EOPNOTSUPP;
1404 g_raid_disk_done(bp);
1405 } else {
1406 bp->bio_done = g_raid_disk_done;
1407 bp->bio_offset += sd->sd_offset;
1408 G_RAID_LOGREQ(3, bp, "Sending request.");
1409 g_io_request(bp, cp);
1410 }
1411 }
1412
1413 int
1414 g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd,
1415 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1416 {
1417
1418 if (sd->sd_disk == NULL)
1419 return (ENXIO);
1420 if (sd->sd_disk->d_kd.di.dumper == NULL)
1421 return (EOPNOTSUPP);
1422 return (dump_write(&sd->sd_disk->d_kd.di,
1423 virtual, physical,
1424 sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset,
1425 length));
1426 }
1427
1428 static void
1429 g_raid_disk_done(struct bio *bp)
1430 {
1431 struct g_raid_softc *sc;
1432 struct g_raid_subdisk *sd;
1433
1434 sd = bp->bio_caller1;
1435 sc = sd->sd_softc;
1436 mtx_lock(&sc->sc_queue_mtx);
1437 bioq_insert_tail(&sc->sc_queue, bp);
1438 mtx_unlock(&sc->sc_queue_mtx);
1439 if (!dumping)
1440 wakeup(sc);
1441 }
1442
1443 static void
1444 g_raid_disk_done_request(struct bio *bp)
1445 {
1446 struct g_raid_softc *sc;
1447 struct g_raid_disk *disk;
1448 struct g_raid_subdisk *sd;
1449 struct g_raid_volume *vol;
1450
1451 g_topology_assert_not();
1452
1453 G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error);
1454 sd = bp->bio_caller1;
1455 sc = sd->sd_softc;
1456 vol = sd->sd_volume;
1457 if (bp->bio_from != NULL) {
1458 bp->bio_from->index--;
1459 disk = bp->bio_from->private;
1460 if (disk == NULL)
1461 g_raid_kill_consumer(sc, bp->bio_from);
1462 }
1463 bp->bio_offset -= sd->sd_offset;
1464
1465 G_RAID_TR_IODONE(vol->v_tr, sd, bp);
1466 }
1467
1468 static void
1469 g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep)
1470 {
1471
1472 if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0)
1473 ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event);
1474 else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0)
1475 ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event);
1476 else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0)
1477 ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event);
1478 else
1479 ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event);
1480 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) {
1481 KASSERT(ep->e_error == 0,
1482 ("Error cannot be handled."));
1483 g_raid_event_free(ep);
1484 } else {
1485 ep->e_flags |= G_RAID_EVENT_DONE;
1486 G_RAID_DEBUG1(4, sc, "Waking up %p.", ep);
1487 mtx_lock(&sc->sc_queue_mtx);
1488 wakeup(ep);
1489 mtx_unlock(&sc->sc_queue_mtx);
1490 }
1491 }
1492
1493 /*
1494 * Worker thread.
1495 */
1496 static void
1497 g_raid_worker(void *arg)
1498 {
1499 struct g_raid_softc *sc;
1500 struct g_raid_event *ep;
1501 struct g_raid_volume *vol;
1502 struct bio *bp;
1503 struct timeval now, t;
1504 int timeout, rv;
1505
1506 sc = arg;
1507 thread_lock(curthread);
1508 sched_prio(curthread, PRIBIO);
1509 thread_unlock(curthread);
1510
1511 sx_xlock(&sc->sc_lock);
1512 for (;;) {
1513 mtx_lock(&sc->sc_queue_mtx);
1514 /*
1515 * First take a look at events.
1516 * This is important to handle events before any I/O requests.
1517 */
1518 bp = NULL;
1519 vol = NULL;
1520 rv = 0;
1521 ep = TAILQ_FIRST(&sc->sc_events);
1522 if (ep != NULL)
1523 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1524 else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL)
1525 ;
1526 else {
1527 getmicrouptime(&now);
1528 t = now;
1529 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1530 if (bioq_first(&vol->v_inflight) == NULL &&
1531 vol->v_tr &&
1532 timevalcmp(&vol->v_last_done, &t, < ))
1533 t = vol->v_last_done;
1534 }
1535 timevalsub(&t, &now);
1536 timeout = g_raid_idle_threshold +
1537 t.tv_sec * 1000000 + t.tv_usec;
1538 if (timeout > 0) {
1539 /*
1540 * Two steps to avoid overflows at HZ=1000
1541 * and idle timeouts > 2.1s. Some rounding
1542 * errors can occur, but they are < 1tick,
1543 * which is deemed to be close enough for
1544 * this purpose.
1545 */
1546 int micpertic = 1000000 / hz;
1547 timeout = (timeout + micpertic - 1) / micpertic;
1548 sx_xunlock(&sc->sc_lock);
1549 MSLEEP(rv, sc, &sc->sc_queue_mtx,
1550 PRIBIO | PDROP, "-", timeout);
1551 sx_xlock(&sc->sc_lock);
1552 goto process;
1553 } else
1554 rv = EWOULDBLOCK;
1555 }
1556 mtx_unlock(&sc->sc_queue_mtx);
1557 process:
1558 if (ep != NULL) {
1559 g_raid_handle_event(sc, ep);
1560 } else if (bp != NULL) {
1561 if (bp->bio_to != NULL &&
1562 bp->bio_to->geom == sc->sc_geom)
1563 g_raid_start_request(bp);
1564 else
1565 g_raid_disk_done_request(bp);
1566 } else if (rv == EWOULDBLOCK) {
1567 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1568 g_raid_clean(vol, -1);
1569 if (bioq_first(&vol->v_inflight) == NULL &&
1570 vol->v_tr) {
1571 t.tv_sec = g_raid_idle_threshold / 1000000;
1572 t.tv_usec = g_raid_idle_threshold % 1000000;
1573 timevaladd(&t, &vol->v_last_done);
1574 getmicrouptime(&now);
1575 if (timevalcmp(&t, &now, <= )) {
1576 G_RAID_TR_IDLE(vol->v_tr);
1577 vol->v_last_done = now;
1578 }
1579 }
1580 }
1581 }
1582 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
1583 g_raid_destroy_node(sc, 1); /* May not return. */
1584 }
1585 }
1586
1587 static void
1588 g_raid_poll(struct g_raid_softc *sc)
1589 {
1590 struct g_raid_event *ep;
1591 struct bio *bp;
1592
1593 sx_xlock(&sc->sc_lock);
1594 mtx_lock(&sc->sc_queue_mtx);
1595 /*
1596 * First take a look at events.
1597 * This is important to handle events before any I/O requests.
1598 */
1599 ep = TAILQ_FIRST(&sc->sc_events);
1600 if (ep != NULL) {
1601 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1602 mtx_unlock(&sc->sc_queue_mtx);
1603 g_raid_handle_event(sc, ep);
1604 goto out;
1605 }
1606 bp = bioq_takefirst(&sc->sc_queue);
1607 if (bp != NULL) {
1608 mtx_unlock(&sc->sc_queue_mtx);
1609 if (bp->bio_from == NULL ||
1610 bp->bio_from->geom != sc->sc_geom)
1611 g_raid_start_request(bp);
1612 else
1613 g_raid_disk_done_request(bp);
1614 }
1615 out:
1616 sx_xunlock(&sc->sc_lock);
1617 }
1618
1619 static void
1620 g_raid_launch_provider(struct g_raid_volume *vol)
1621 {
1622 struct g_raid_disk *disk;
1623 struct g_raid_subdisk *sd;
1624 struct g_raid_softc *sc;
1625 struct g_provider *pp;
1626 char name[G_RAID_MAX_VOLUMENAME];
1627 off_t off;
1628 int i;
1629
1630 sc = vol->v_softc;
1631 sx_assert(&sc->sc_lock, SX_LOCKED);
1632
1633 g_topology_lock();
1634 /* Try to name provider with volume name. */
1635 snprintf(name, sizeof(name), "raid/%s", vol->v_name);
1636 if (g_raid_name_format == 0 || vol->v_name[0] == 0 ||
1637 g_provider_by_name(name) != NULL) {
1638 /* Otherwise use sequential volume number. */
1639 snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id);
1640 }
1641
1642 pp = g_new_providerf(sc->sc_geom, "%s", name);
1643 pp->flags |= G_PF_DIRECT_RECEIVE;
1644 if (vol->v_tr->tro_class->trc_accept_unmapped) {
1645 pp->flags |= G_PF_ACCEPT_UNMAPPED;
1646 for (i = 0; i < vol->v_disks_count; i++) {
1647 sd = &vol->v_subdisks[i];
1648 if (sd->sd_state == G_RAID_SUBDISK_S_NONE)
1649 continue;
1650 if ((sd->sd_disk->d_consumer->provider->flags &
1651 G_PF_ACCEPT_UNMAPPED) == 0)
1652 pp->flags &= ~G_PF_ACCEPT_UNMAPPED;
1653 }
1654 }
1655 pp->private = vol;
1656 pp->mediasize = vol->v_mediasize;
1657 pp->sectorsize = vol->v_sectorsize;
1658 pp->stripesize = 0;
1659 pp->stripeoffset = 0;
1660 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1661 vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
1662 vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE ||
1663 vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) {
1664 if ((disk = vol->v_subdisks[0].sd_disk) != NULL &&
1665 disk->d_consumer != NULL &&
1666 disk->d_consumer->provider != NULL) {
1667 pp->stripesize = disk->d_consumer->provider->stripesize;
1668 off = disk->d_consumer->provider->stripeoffset;
1669 pp->stripeoffset = off + vol->v_subdisks[0].sd_offset;
1670 if (off > 0)
1671 pp->stripeoffset %= off;
1672 }
1673 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) {
1674 pp->stripesize *= (vol->v_disks_count - 1);
1675 pp->stripeoffset *= (vol->v_disks_count - 1);
1676 }
1677 } else
1678 pp->stripesize = vol->v_strip_size;
1679 vol->v_provider = pp;
1680 g_error_provider(pp, 0);
1681 g_topology_unlock();
1682 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.",
1683 pp->name, vol->v_name);
1684 }
1685
1686 static void
1687 g_raid_destroy_provider(struct g_raid_volume *vol)
1688 {
1689 struct g_raid_softc *sc;
1690 struct g_provider *pp;
1691 struct bio *bp, *tmp;
1692
1693 g_topology_assert_not();
1694 sc = vol->v_softc;
1695 pp = vol->v_provider;
1696 KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name));
1697
1698 g_topology_lock();
1699 g_error_provider(pp, ENXIO);
1700 mtx_lock(&sc->sc_queue_mtx);
1701 TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) {
1702 if (bp->bio_to != pp)
1703 continue;
1704 bioq_remove(&sc->sc_queue, bp);
1705 g_io_deliver(bp, ENXIO);
1706 }
1707 mtx_unlock(&sc->sc_queue_mtx);
1708 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.",
1709 pp->name, vol->v_name);
1710 g_wither_provider(pp, ENXIO);
1711 g_topology_unlock();
1712 vol->v_provider = NULL;
1713 }
1714
1715 /*
1716 * Update device state.
1717 */
1718 static int
1719 g_raid_update_volume(struct g_raid_volume *vol, u_int event)
1720 {
1721 struct g_raid_softc *sc;
1722
1723 sc = vol->v_softc;
1724 sx_assert(&sc->sc_lock, SX_XLOCKED);
1725
1726 G_RAID_DEBUG1(2, sc, "Event %s for volume %s.",
1727 g_raid_volume_event2str(event),
1728 vol->v_name);
1729 switch (event) {
1730 case G_RAID_VOLUME_E_DOWN:
1731 if (vol->v_provider != NULL)
1732 g_raid_destroy_provider(vol);
1733 break;
1734 case G_RAID_VOLUME_E_UP:
1735 if (vol->v_provider == NULL)
1736 g_raid_launch_provider(vol);
1737 break;
1738 case G_RAID_VOLUME_E_START:
1739 if (vol->v_tr)
1740 G_RAID_TR_START(vol->v_tr);
1741 return (0);
1742 default:
1743 if (sc->sc_md)
1744 G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event);
1745 return (0);
1746 }
1747
1748 /* Manage root mount release. */
1749 if (vol->v_starting) {
1750 vol->v_starting = 0;
1751 G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount);
1752 root_mount_rel(vol->v_rootmount);
1753 vol->v_rootmount = NULL;
1754 }
1755 if (vol->v_stopping && vol->v_provider_open == 0)
1756 g_raid_destroy_volume(vol);
1757 return (0);
1758 }
1759
1760 /*
1761 * Update subdisk state.
1762 */
1763 static int
1764 g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event)
1765 {
1766 struct g_raid_softc *sc;
1767 struct g_raid_volume *vol;
1768
1769 sc = sd->sd_softc;
1770 vol = sd->sd_volume;
1771 sx_assert(&sc->sc_lock, SX_XLOCKED);
1772
1773 G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.",
1774 g_raid_subdisk_event2str(event),
1775 vol->v_name, sd->sd_pos,
1776 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
1777 if (vol->v_tr)
1778 G_RAID_TR_EVENT(vol->v_tr, sd, event);
1779
1780 return (0);
1781 }
1782
1783 /*
1784 * Update disk state.
1785 */
1786 static int
1787 g_raid_update_disk(struct g_raid_disk *disk, u_int event)
1788 {
1789 struct g_raid_softc *sc;
1790
1791 sc = disk->d_softc;
1792 sx_assert(&sc->sc_lock, SX_XLOCKED);
1793
1794 G_RAID_DEBUG1(2, sc, "Event %s for disk %s.",
1795 g_raid_disk_event2str(event),
1796 g_raid_get_diskname(disk));
1797
1798 if (sc->sc_md)
1799 G_RAID_MD_EVENT(sc->sc_md, disk, event);
1800 return (0);
1801 }
1802
1803 /*
1804 * Node event.
1805 */
1806 static int
1807 g_raid_update_node(struct g_raid_softc *sc, u_int event)
1808 {
1809 sx_assert(&sc->sc_lock, SX_XLOCKED);
1810
1811 G_RAID_DEBUG1(2, sc, "Event %s for the array.",
1812 g_raid_node_event2str(event));
1813
1814 if (event == G_RAID_NODE_E_WAKE)
1815 return (0);
1816 if (sc->sc_md)
1817 G_RAID_MD_EVENT(sc->sc_md, NULL, event);
1818 return (0);
1819 }
1820
1821 static int
1822 g_raid_access(struct g_provider *pp, int acr, int acw, int ace)
1823 {
1824 struct g_raid_volume *vol;
1825 struct g_raid_softc *sc;
1826 int dcw, opens, error = 0;
1827
1828 g_topology_assert();
1829 sc = pp->geom->softc;
1830 vol = pp->private;
1831 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
1832 KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name));
1833
1834 G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
1835 acr, acw, ace);
1836 dcw = pp->acw + acw;
1837
1838 g_topology_unlock();
1839 sx_xlock(&sc->sc_lock);
1840 /* Deny new opens while dying. */
1841 if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) {
1842 error = ENXIO;
1843 goto out;
1844 }
1845 /* Deny write opens for read-only volumes. */
1846 if (vol->v_read_only && acw > 0) {
1847 error = EROFS;
1848 goto out;
1849 }
1850 if (dcw == 0)
1851 g_raid_clean(vol, dcw);
1852 vol->v_provider_open += acr + acw + ace;
1853 /* Handle delayed node destruction. */
1854 if (sc->sc_stopping == G_RAID_DESTROY_DELAYED &&
1855 vol->v_provider_open == 0) {
1856 /* Count open volumes. */
1857 opens = g_raid_nopens(sc);
1858 if (opens == 0) {
1859 sc->sc_stopping = G_RAID_DESTROY_HARD;
1860 /* Wake up worker to make it selfdestruct. */
1861 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1862 }
1863 }
1864 /* Handle open volume destruction. */
1865 if (vol->v_stopping && vol->v_provider_open == 0)
1866 g_raid_destroy_volume(vol);
1867 out:
1868 sx_xunlock(&sc->sc_lock);
1869 g_topology_lock();
1870 return (error);
1871 }
1872
1873 struct g_raid_softc *
1874 g_raid_create_node(struct g_class *mp,
1875 const char *name, struct g_raid_md_object *md)
1876 {
1877 struct g_raid_softc *sc;
1878 struct g_geom *gp;
1879 int error;
1880
1881 g_topology_assert();
1882 G_RAID_DEBUG(1, "Creating array %s.", name);
1883
1884 gp = g_new_geomf(mp, "%s", name);
1885 sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
1886 gp->start = g_raid_start;
1887 gp->orphan = g_raid_orphan;
1888 gp->access = g_raid_access;
1889 gp->dumpconf = g_raid_dumpconf;
1890
1891 sc->sc_md = md;
1892 sc->sc_geom = gp;
1893 sc->sc_flags = 0;
1894 TAILQ_INIT(&sc->sc_volumes);
1895 TAILQ_INIT(&sc->sc_disks);
1896 sx_init(&sc->sc_lock, "graid:lock");
1897 mtx_init(&sc->sc_queue_mtx, "graid:queue", NULL, MTX_DEF);
1898 TAILQ_INIT(&sc->sc_events);
1899 bioq_init(&sc->sc_queue);
1900 gp->softc = sc;
1901 error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0,
1902 "g_raid %s", name);
1903 if (error != 0) {
1904 G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name);
1905 mtx_destroy(&sc->sc_queue_mtx);
1906 sx_destroy(&sc->sc_lock);
1907 g_destroy_geom(sc->sc_geom);
1908 free(sc, M_RAID);
1909 return (NULL);
1910 }
1911
1912 G_RAID_DEBUG1(0, sc, "Array %s created.", name);
1913 return (sc);
1914 }
1915
1916 struct g_raid_volume *
1917 g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id)
1918 {
1919 struct g_raid_volume *vol, *vol1;
1920 int i;
1921
1922 G_RAID_DEBUG1(1, sc, "Creating volume %s.", name);
1923 vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO);
1924 vol->v_softc = sc;
1925 strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME);
1926 vol->v_state = G_RAID_VOLUME_S_STARTING;
1927 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
1928 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN;
1929 vol->v_rotate_parity = 1;
1930 bioq_init(&vol->v_inflight);
1931 bioq_init(&vol->v_locked);
1932 LIST_INIT(&vol->v_locks);
1933 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
1934 vol->v_subdisks[i].sd_softc = sc;
1935 vol->v_subdisks[i].sd_volume = vol;
1936 vol->v_subdisks[i].sd_pos = i;
1937 vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE;
1938 }
1939
1940 /* Find free ID for this volume. */
1941 g_topology_lock();
1942 vol1 = vol;
1943 if (id >= 0) {
1944 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1945 if (vol1->v_global_id == id)
1946 break;
1947 }
1948 }
1949 if (vol1 != NULL) {
1950 for (id = 0; ; id++) {
1951 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1952 if (vol1->v_global_id == id)
1953 break;
1954 }
1955 if (vol1 == NULL)
1956 break;
1957 }
1958 }
1959 vol->v_global_id = id;
1960 LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next);
1961 g_topology_unlock();
1962
1963 /* Delay root mounting. */
1964 vol->v_rootmount = root_mount_hold("GRAID");
1965 G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount);
1966 vol->v_starting = 1;
1967 TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
1968 return (vol);
1969 }
1970
1971 struct g_raid_disk *
1972 g_raid_create_disk(struct g_raid_softc *sc)
1973 {
1974 struct g_raid_disk *disk;
1975
1976 G_RAID_DEBUG1(1, sc, "Creating disk.");
1977 disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
1978 disk->d_softc = sc;
1979 disk->d_state = G_RAID_DISK_S_NONE;
1980 TAILQ_INIT(&disk->d_subdisks);
1981 TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next);
1982 return (disk);
1983 }
1984
1985 int g_raid_start_volume(struct g_raid_volume *vol)
1986 {
1987 struct g_raid_tr_class *class;
1988 struct g_raid_tr_object *obj;
1989 int status;
1990
1991 G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name);
1992 LIST_FOREACH(class, &g_raid_tr_classes, trc_list) {
1993 if (!class->trc_enable)
1994 continue;
1995 G_RAID_DEBUG1(2, vol->v_softc,
1996 "Tasting volume %s for %s transformation.",
1997 vol->v_name, class->name);
1998 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
1999 M_WAITOK);
2000 obj->tro_class = class;
2001 obj->tro_volume = vol;
2002 status = G_RAID_TR_TASTE(obj, vol);
2003 if (status != G_RAID_TR_TASTE_FAIL)
2004 break;
2005 kobj_delete((kobj_t)obj, M_RAID);
2006 }
2007 if (class == NULL) {
2008 G_RAID_DEBUG1(0, vol->v_softc,
2009 "No transformation module found for %s.",
2010 vol->v_name);
2011 vol->v_tr = NULL;
2012 g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED);
2013 g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN,
2014 G_RAID_EVENT_VOLUME);
2015 return (-1);
2016 }
2017 G_RAID_DEBUG1(2, vol->v_softc,
2018 "Transformation module %s chosen for %s.",
2019 class->name, vol->v_name);
2020 vol->v_tr = obj;
2021 return (0);
2022 }
2023
2024 int
2025 g_raid_destroy_node(struct g_raid_softc *sc, int worker)
2026 {
2027 struct g_raid_volume *vol, *tmpv;
2028 struct g_raid_disk *disk, *tmpd;
2029 int error = 0;
2030
2031 sc->sc_stopping = G_RAID_DESTROY_HARD;
2032 TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
2033 if (g_raid_destroy_volume(vol))
2034 error = EBUSY;
2035 }
2036 if (error)
2037 return (error);
2038 TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
2039 if (g_raid_destroy_disk(disk))
2040 error = EBUSY;
2041 }
2042 if (error)
2043 return (error);
2044 if (sc->sc_md) {
2045 G_RAID_MD_FREE(sc->sc_md);
2046 kobj_delete((kobj_t)sc->sc_md, M_RAID);
2047 sc->sc_md = NULL;
2048 }
2049 if (sc->sc_geom != NULL) {
2050 G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name);
2051 g_topology_lock();
2052 sc->sc_geom->softc = NULL;
2053 g_wither_geom(sc->sc_geom, ENXIO);
2054 g_topology_unlock();
2055 sc->sc_geom = NULL;
2056 } else
2057 G_RAID_DEBUG(1, "Array destroyed.");
2058 if (worker) {
2059 g_raid_event_cancel(sc, sc);
2060 mtx_destroy(&sc->sc_queue_mtx);
2061 sx_xunlock(&sc->sc_lock);
2062 sx_destroy(&sc->sc_lock);
2063 wakeup(&sc->sc_stopping);
2064 free(sc, M_RAID);
2065 curthread->td_pflags &= ~TDP_GEOM;
2066 G_RAID_DEBUG(1, "Thread exiting.");
2067 kproc_exit(0);
2068 } else {
2069 /* Wake up worker to make it selfdestruct. */
2070 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2071 }
2072 return (0);
2073 }
2074
2075 int
2076 g_raid_destroy_volume(struct g_raid_volume *vol)
2077 {
2078 struct g_raid_softc *sc;
2079 struct g_raid_disk *disk;
2080 int i;
2081
2082 sc = vol->v_softc;
2083 G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name);
2084 vol->v_stopping = 1;
2085 if (vol->v_state != G_RAID_VOLUME_S_STOPPED) {
2086 if (vol->v_tr) {
2087 G_RAID_TR_STOP(vol->v_tr);
2088 return (EBUSY);
2089 } else
2090 vol->v_state = G_RAID_VOLUME_S_STOPPED;
2091 }
2092 if (g_raid_event_check(sc, vol) != 0)
2093 return (EBUSY);
2094 if (vol->v_provider != NULL)
2095 return (EBUSY);
2096 if (vol->v_provider_open != 0)
2097 return (EBUSY);
2098 if (vol->v_tr) {
2099 G_RAID_TR_FREE(vol->v_tr);
2100 kobj_delete((kobj_t)vol->v_tr, M_RAID);
2101 vol->v_tr = NULL;
2102 }
2103 if (vol->v_rootmount)
2104 root_mount_rel(vol->v_rootmount);
2105 g_topology_lock();
2106 LIST_REMOVE(vol, v_global_next);
2107 g_topology_unlock();
2108 TAILQ_REMOVE(&sc->sc_volumes, vol, v_next);
2109 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
2110 g_raid_event_cancel(sc, &vol->v_subdisks[i]);
2111 disk = vol->v_subdisks[i].sd_disk;
2112 if (disk == NULL)
2113 continue;
2114 TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
2115 }
2116 G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name);
2117 if (sc->sc_md)
2118 G_RAID_MD_FREE_VOLUME(sc->sc_md, vol);
2119 g_raid_event_cancel(sc, vol);
2120 free(vol, M_RAID);
2121 if (sc->sc_stopping == G_RAID_DESTROY_HARD) {
2122 /* Wake up worker to let it selfdestruct. */
2123 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2124 }
2125 return (0);
2126 }
2127
2128 int
2129 g_raid_destroy_disk(struct g_raid_disk *disk)
2130 {
2131 struct g_raid_softc *sc;
2132 struct g_raid_subdisk *sd, *tmp;
2133
2134 sc = disk->d_softc;
2135 G_RAID_DEBUG1(2, sc, "Destroying disk.");
2136 if (disk->d_consumer) {
2137 g_raid_kill_consumer(sc, disk->d_consumer);
2138 disk->d_consumer = NULL;
2139 }
2140 TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
2141 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
2142 g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
2143 G_RAID_EVENT_SUBDISK);
2144 TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next);
2145 sd->sd_disk = NULL;
2146 }
2147 TAILQ_REMOVE(&sc->sc_disks, disk, d_next);
2148 if (sc->sc_md)
2149 G_RAID_MD_FREE_DISK(sc->sc_md, disk);
2150 g_raid_event_cancel(sc, disk);
2151 free(disk, M_RAID);
2152 return (0);
2153 }
2154
2155 int
2156 g_raid_destroy(struct g_raid_softc *sc, int how)
2157 {
2158 int error, opens;
2159
2160 g_topology_assert_not();
2161 if (sc == NULL)
2162 return (ENXIO);
2163 sx_assert(&sc->sc_lock, SX_XLOCKED);
2164
2165 /* Count open volumes. */
2166 opens = g_raid_nopens(sc);
2167
2168 /* React on some opened volumes. */
2169 if (opens > 0) {
2170 switch (how) {
2171 case G_RAID_DESTROY_SOFT:
2172 G_RAID_DEBUG1(1, sc,
2173 "%d volumes are still open.",
2174 opens);
2175 sx_xunlock(&sc->sc_lock);
2176 return (EBUSY);
2177 case G_RAID_DESTROY_DELAYED:
2178 G_RAID_DEBUG1(1, sc,
2179 "Array will be destroyed on last close.");
2180 sc->sc_stopping = G_RAID_DESTROY_DELAYED;
2181 sx_xunlock(&sc->sc_lock);
2182 return (EBUSY);
2183 case G_RAID_DESTROY_HARD:
2184 G_RAID_DEBUG1(1, sc,
2185 "%d volumes are still open.",
2186 opens);
2187 }
2188 }
2189
2190 /* Mark node for destruction. */
2191 sc->sc_stopping = G_RAID_DESTROY_HARD;
2192 /* Wake up worker to let it selfdestruct. */
2193 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2194 /* Sleep until node destroyed. */
2195 error = sx_sleep(&sc->sc_stopping, &sc->sc_lock,
2196 PRIBIO | PDROP, "r:destroy", hz * 3);
2197 return (error == EWOULDBLOCK ? EBUSY : 0);
2198 }
2199
2200 static void
2201 g_raid_taste_orphan(struct g_consumer *cp)
2202 {
2203
2204 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2205 cp->provider->name));
2206 }
2207
2208 static struct g_geom *
2209 g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2210 {
2211 struct g_consumer *cp;
2212 struct g_geom *gp, *geom;
2213 struct g_raid_md_class *class;
2214 struct g_raid_md_object *obj;
2215 int status;
2216
2217 g_topology_assert();
2218 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2219 if (!g_raid_enable)
2220 return (NULL);
2221 G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
2222
2223 geom = NULL;
2224 status = G_RAID_MD_TASTE_FAIL;
2225 gp = g_new_geomf(mp, "raid:taste");
2226 /*
2227 * This orphan function should be never called.
2228 */
2229 gp->orphan = g_raid_taste_orphan;
2230 cp = g_new_consumer(gp);
2231 cp->flags |= G_CF_DIRECT_RECEIVE;
2232 g_attach(cp, pp);
2233 if (g_access(cp, 1, 0, 0) != 0)
2234 goto ofail;
2235
2236 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2237 if (!class->mdc_enable)
2238 continue;
2239 G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.",
2240 pp->name, class->name);
2241 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2242 M_WAITOK);
2243 obj->mdo_class = class;
2244 status = G_RAID_MD_TASTE(obj, mp, cp, &geom);
2245 if (status != G_RAID_MD_TASTE_NEW)
2246 kobj_delete((kobj_t)obj, M_RAID);
2247 if (status != G_RAID_MD_TASTE_FAIL)
2248 break;
2249 }
2250
2251 if (status == G_RAID_MD_TASTE_FAIL)
2252 (void)g_access(cp, -1, 0, 0);
2253 ofail:
2254 g_detach(cp);
2255 g_destroy_consumer(cp);
2256 g_destroy_geom(gp);
2257 G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name);
2258 return (geom);
2259 }
2260
2261 int
2262 g_raid_create_node_format(const char *format, struct gctl_req *req,
2263 struct g_geom **gp)
2264 {
2265 struct g_raid_md_class *class;
2266 struct g_raid_md_object *obj;
2267 int status;
2268
2269 G_RAID_DEBUG(2, "Creating array for %s metadata.", format);
2270 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2271 if (strcasecmp(class->name, format) == 0)
2272 break;
2273 }
2274 if (class == NULL) {
2275 G_RAID_DEBUG(1, "No support for %s metadata.", format);
2276 return (G_RAID_MD_TASTE_FAIL);
2277 }
2278 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2279 M_WAITOK);
2280 obj->mdo_class = class;
2281 status = G_RAID_MD_CREATE_REQ(obj, &g_raid_class, req, gp);
2282 if (status != G_RAID_MD_TASTE_NEW)
2283 kobj_delete((kobj_t)obj, M_RAID);
2284 return (status);
2285 }
2286
2287 static int
2288 g_raid_destroy_geom(struct gctl_req *req __unused,
2289 struct g_class *mp __unused, struct g_geom *gp)
2290 {
2291 struct g_raid_softc *sc;
2292 int error;
2293
2294 g_topology_unlock();
2295 sc = gp->softc;
2296 sx_xlock(&sc->sc_lock);
2297 g_cancel_event(sc);
2298 error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT);
2299 g_topology_lock();
2300 return (error);
2301 }
2302
2303 void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
2304 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2305 {
2306
2307 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
2308 return;
2309 if (sc->sc_md)
2310 G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk);
2311 }
2312
2313 void g_raid_fail_disk(struct g_raid_softc *sc,
2314 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2315 {
2316
2317 if (disk == NULL)
2318 disk = sd->sd_disk;
2319 if (disk == NULL) {
2320 G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!");
2321 return;
2322 }
2323 if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
2324 G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a "
2325 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
2326 return;
2327 }
2328 if (sc->sc_md)
2329 G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk);
2330 }
2331
2332 static void
2333 g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2334 struct g_consumer *cp, struct g_provider *pp)
2335 {
2336 struct g_raid_softc *sc;
2337 struct g_raid_volume *vol;
2338 struct g_raid_subdisk *sd;
2339 struct g_raid_disk *disk;
2340 int i, s;
2341
2342 g_topology_assert();
2343
2344 sc = gp->softc;
2345 if (sc == NULL)
2346 return;
2347 if (pp != NULL) {
2348 vol = pp->private;
2349 g_topology_unlock();
2350 sx_xlock(&sc->sc_lock);
2351 sbuf_printf(sb, "%s<descr>%s %s volume</descr>\n", indent,
2352 sc->sc_md->mdo_class->name,
2353 g_raid_volume_level2str(vol->v_raid_level,
2354 vol->v_raid_level_qualifier));
2355 sbuf_printf(sb, "%s<Label>%s</Label>\n", indent,
2356 vol->v_name);
2357 sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent,
2358 g_raid_volume_level2str(vol->v_raid_level,
2359 vol->v_raid_level_qualifier));
2360 sbuf_printf(sb,
2361 "%s<Transformation>%s</Transformation>\n", indent,
2362 vol->v_tr ? vol->v_tr->tro_class->name : "NONE");
2363 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2364 vol->v_disks_count);
2365 sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent,
2366 vol->v_strip_size);
2367 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2368 g_raid_volume_state2str(vol->v_state));
2369 sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent,
2370 vol->v_dirty ? "Yes" : "No");
2371 sbuf_printf(sb, "%s<Subdisks>", indent);
2372 for (i = 0; i < vol->v_disks_count; i++) {
2373 sd = &vol->v_subdisks[i];
2374 if (sd->sd_disk != NULL &&
2375 sd->sd_disk->d_consumer != NULL) {
2376 sbuf_printf(sb, "%s ",
2377 g_raid_get_diskname(sd->sd_disk));
2378 } else {
2379 sbuf_printf(sb, "NONE ");
2380 }
2381 sbuf_printf(sb, "(%s",
2382 g_raid_subdisk_state2str(sd->sd_state));
2383 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2384 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2385 sbuf_printf(sb, " %d%%",
2386 (int)(sd->sd_rebuild_pos * 100 /
2387 sd->sd_size));
2388 }
2389 sbuf_printf(sb, ")");
2390 if (i + 1 < vol->v_disks_count)
2391 sbuf_printf(sb, ", ");
2392 }
2393 sbuf_printf(sb, "</Subdisks>\n");
2394 sx_xunlock(&sc->sc_lock);
2395 g_topology_lock();
2396 } else if (cp != NULL) {
2397 disk = cp->private;
2398 if (disk == NULL)
2399 return;
2400 g_topology_unlock();
2401 sx_xlock(&sc->sc_lock);
2402 sbuf_printf(sb, "%s<State>%s", indent,
2403 g_raid_disk_state2str(disk->d_state));
2404 if (!TAILQ_EMPTY(&disk->d_subdisks)) {
2405 sbuf_printf(sb, " (");
2406 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2407 sbuf_printf(sb, "%s",
2408 g_raid_subdisk_state2str(sd->sd_state));
2409 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2410 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2411 sbuf_printf(sb, " %d%%",
2412 (int)(sd->sd_rebuild_pos * 100 /
2413 sd->sd_size));
2414 }
2415 if (TAILQ_NEXT(sd, sd_next))
2416 sbuf_printf(sb, ", ");
2417 }
2418 sbuf_printf(sb, ")");
2419 }
2420 sbuf_printf(sb, "</State>\n");
2421 sbuf_printf(sb, "%s<Subdisks>", indent);
2422 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2423 sbuf_printf(sb, "r%d(%s):%d@%ju",
2424 sd->sd_volume->v_global_id,
2425 sd->sd_volume->v_name,
2426 sd->sd_pos, sd->sd_offset);
2427 if (TAILQ_NEXT(sd, sd_next))
2428 sbuf_printf(sb, ", ");
2429 }
2430 sbuf_printf(sb, "</Subdisks>\n");
2431 sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent,
2432 disk->d_read_errs);
2433 sx_xunlock(&sc->sc_lock);
2434 g_topology_lock();
2435 } else {
2436 g_topology_unlock();
2437 sx_xlock(&sc->sc_lock);
2438 if (sc->sc_md) {
2439 sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent,
2440 sc->sc_md->mdo_class->name);
2441 }
2442 if (!TAILQ_EMPTY(&sc->sc_volumes)) {
2443 s = 0xff;
2444 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
2445 if (vol->v_state < s)
2446 s = vol->v_state;
2447 }
2448 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2449 g_raid_volume_state2str(s));
2450 }
2451 sx_xunlock(&sc->sc_lock);
2452 g_topology_lock();
2453 }
2454 }
2455
2456 static void
2457 g_raid_shutdown_post_sync(void *arg, int howto)
2458 {
2459 struct g_class *mp;
2460 struct g_geom *gp, *gp2;
2461 struct g_raid_softc *sc;
2462 struct g_raid_volume *vol;
2463
2464 mp = arg;
2465 g_topology_lock();
2466 g_raid_shutdown = 1;
2467 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2468 if ((sc = gp->softc) == NULL)
2469 continue;
2470 g_topology_unlock();
2471 sx_xlock(&sc->sc_lock);
2472 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next)
2473 g_raid_clean(vol, -1);
2474 g_cancel_event(sc);
2475 g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
2476 g_topology_lock();
2477 }
2478 g_topology_unlock();
2479 }
2480
2481 static void
2482 g_raid_init(struct g_class *mp)
2483 {
2484
2485 g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
2486 g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
2487 if (g_raid_post_sync == NULL)
2488 G_RAID_DEBUG(0, "Warning! Cannot register shutdown event.");
2489 g_raid_started = 1;
2490 }
2491
2492 static void
2493 g_raid_fini(struct g_class *mp)
2494 {
2495
2496 if (g_raid_post_sync != NULL)
2497 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid_post_sync);
2498 g_raid_started = 0;
2499 }
2500
2501 int
2502 g_raid_md_modevent(module_t mod, int type, void *arg)
2503 {
2504 struct g_raid_md_class *class, *c, *nc;
2505 int error;
2506
2507 error = 0;
2508 class = arg;
2509 switch (type) {
2510 case MOD_LOAD:
2511 c = LIST_FIRST(&g_raid_md_classes);
2512 if (c == NULL || c->mdc_priority > class->mdc_priority)
2513 LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list);
2514 else {
2515 while ((nc = LIST_NEXT(c, mdc_list)) != NULL &&
2516 nc->mdc_priority < class->mdc_priority)
2517 c = nc;
2518 LIST_INSERT_AFTER(c, class, mdc_list);
2519 }
2520 if (g_raid_started)
2521 g_retaste(&g_raid_class);
2522 break;
2523 case MOD_UNLOAD:
2524 LIST_REMOVE(class, mdc_list);
2525 break;
2526 default:
2527 error = EOPNOTSUPP;
2528 break;
2529 }
2530
2531 return (error);
2532 }
2533
2534 int
2535 g_raid_tr_modevent(module_t mod, int type, void *arg)
2536 {
2537 struct g_raid_tr_class *class, *c, *nc;
2538 int error;
2539
2540 error = 0;
2541 class = arg;
2542 switch (type) {
2543 case MOD_LOAD:
2544 c = LIST_FIRST(&g_raid_tr_classes);
2545 if (c == NULL || c->trc_priority > class->trc_priority)
2546 LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list);
2547 else {
2548 while ((nc = LIST_NEXT(c, trc_list)) != NULL &&
2549 nc->trc_priority < class->trc_priority)
2550 c = nc;
2551 LIST_INSERT_AFTER(c, class, trc_list);
2552 }
2553 break;
2554 case MOD_UNLOAD:
2555 LIST_REMOVE(class, trc_list);
2556 break;
2557 default:
2558 error = EOPNOTSUPP;
2559 break;
2560 }
2561
2562 return (error);
2563 }
2564
2565 /*
2566 * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid)
2567 * to reduce module priority, allowing submodules to register them first.
2568 */
2569 static moduledata_t g_raid_mod = {
2570 "g_raid",
2571 g_modevent,
2572 &g_raid_class
2573 };
2574 DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD);
2575 MODULE_VERSION(geom_raid, 0);
Cache object: d644016b70bb63f3cc875018b0a671fc
|