FreeBSD/Linux Kernel Cross Reference
sys/geom/raid/g_raid.c
1 /*-
2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/eventhandler.h>
41 #include <vm/uma.h>
42 #include <geom/geom.h>
43 #include <sys/proc.h>
44 #include <sys/kthread.h>
45 #include <sys/sched.h>
46 #include <geom/raid/g_raid.h>
47 #include "g_raid_md_if.h"
48 #include "g_raid_tr_if.h"
49
50 static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data");
51
52 SYSCTL_DECL(_kern_geom);
53 SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff");
54 int g_raid_enable = 1;
55 TUNABLE_INT("kern.geom.raid.enable", &g_raid_enable);
56 SYSCTL_INT(_kern_geom_raid, OID_AUTO, enable, CTLFLAG_RW,
57 &g_raid_enable, 0, "Enable on-disk metadata taste");
58 u_int g_raid_aggressive_spare = 0;
59 TUNABLE_INT("kern.geom.raid.aggressive_spare", &g_raid_aggressive_spare);
60 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RW,
61 &g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
62 u_int g_raid_debug = 0;
63 TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug);
64 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0,
65 "Debug level");
66 int g_raid_read_err_thresh = 10;
67 TUNABLE_INT("kern.geom.raid.read_err_thresh", &g_raid_read_err_thresh);
68 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RW,
69 &g_raid_read_err_thresh, 0,
70 "Number of read errors equated to disk failure");
71 u_int g_raid_start_timeout = 30;
72 TUNABLE_INT("kern.geom.raid.start_timeout", &g_raid_start_timeout);
73 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RW,
74 &g_raid_start_timeout, 0,
75 "Time to wait for all array components");
76 static u_int g_raid_clean_time = 5;
77 TUNABLE_INT("kern.geom.raid.clean_time", &g_raid_clean_time);
78 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RW,
79 &g_raid_clean_time, 0, "Mark volume as clean when idling");
80 static u_int g_raid_disconnect_on_failure = 1;
81 TUNABLE_INT("kern.geom.raid.disconnect_on_failure",
82 &g_raid_disconnect_on_failure);
83 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
84 &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
85 static u_int g_raid_name_format = 0;
86 TUNABLE_INT("kern.geom.raid.name_format", &g_raid_name_format);
87 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RW,
88 &g_raid_name_format, 0, "Providers name format.");
89 static u_int g_raid_idle_threshold = 1000000;
90 TUNABLE_INT("kern.geom.raid.idle_threshold", &g_raid_idle_threshold);
91 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RW,
92 &g_raid_idle_threshold, 1000000,
93 "Time in microseconds to consider a volume idle.");
94
95 #define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \
96 G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
97 rv = msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
98 G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
99 } while (0)
100
101 LIST_HEAD(, g_raid_md_class) g_raid_md_classes =
102 LIST_HEAD_INITIALIZER(g_raid_md_classes);
103
104 LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes =
105 LIST_HEAD_INITIALIZER(g_raid_tr_classes);
106
107 LIST_HEAD(, g_raid_volume) g_raid_volumes =
108 LIST_HEAD_INITIALIZER(g_raid_volumes);
109
110 static eventhandler_tag g_raid_post_sync = NULL;
111 static int g_raid_started = 0;
112 static int g_raid_shutdown = 0;
113
114 static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
115 struct g_geom *gp);
116 static g_taste_t g_raid_taste;
117 static void g_raid_init(struct g_class *mp);
118 static void g_raid_fini(struct g_class *mp);
119
120 struct g_class g_raid_class = {
121 .name = G_RAID_CLASS_NAME,
122 .version = G_VERSION,
123 .ctlreq = g_raid_ctl,
124 .taste = g_raid_taste,
125 .destroy_geom = g_raid_destroy_geom,
126 .init = g_raid_init,
127 .fini = g_raid_fini
128 };
129
130 static void g_raid_destroy_provider(struct g_raid_volume *vol);
131 static int g_raid_update_disk(struct g_raid_disk *disk, u_int event);
132 static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event);
133 static int g_raid_update_volume(struct g_raid_volume *vol, u_int event);
134 static int g_raid_update_node(struct g_raid_softc *sc, u_int event);
135 static void g_raid_dumpconf(struct sbuf *sb, const char *indent,
136 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
137 static void g_raid_start(struct bio *bp);
138 static void g_raid_start_request(struct bio *bp);
139 static void g_raid_disk_done(struct bio *bp);
140 static void g_raid_poll(struct g_raid_softc *sc);
141
142 static const char *
143 g_raid_node_event2str(int event)
144 {
145
146 switch (event) {
147 case G_RAID_NODE_E_WAKE:
148 return ("WAKE");
149 case G_RAID_NODE_E_START:
150 return ("START");
151 default:
152 return ("INVALID");
153 }
154 }
155
156 const char *
157 g_raid_disk_state2str(int state)
158 {
159
160 switch (state) {
161 case G_RAID_DISK_S_NONE:
162 return ("NONE");
163 case G_RAID_DISK_S_OFFLINE:
164 return ("OFFLINE");
165 case G_RAID_DISK_S_DISABLED:
166 return ("DISABLED");
167 case G_RAID_DISK_S_FAILED:
168 return ("FAILED");
169 case G_RAID_DISK_S_STALE_FAILED:
170 return ("STALE_FAILED");
171 case G_RAID_DISK_S_SPARE:
172 return ("SPARE");
173 case G_RAID_DISK_S_STALE:
174 return ("STALE");
175 case G_RAID_DISK_S_ACTIVE:
176 return ("ACTIVE");
177 default:
178 return ("INVALID");
179 }
180 }
181
182 static const char *
183 g_raid_disk_event2str(int event)
184 {
185
186 switch (event) {
187 case G_RAID_DISK_E_DISCONNECTED:
188 return ("DISCONNECTED");
189 default:
190 return ("INVALID");
191 }
192 }
193
194 const char *
195 g_raid_subdisk_state2str(int state)
196 {
197
198 switch (state) {
199 case G_RAID_SUBDISK_S_NONE:
200 return ("NONE");
201 case G_RAID_SUBDISK_S_FAILED:
202 return ("FAILED");
203 case G_RAID_SUBDISK_S_NEW:
204 return ("NEW");
205 case G_RAID_SUBDISK_S_REBUILD:
206 return ("REBUILD");
207 case G_RAID_SUBDISK_S_UNINITIALIZED:
208 return ("UNINITIALIZED");
209 case G_RAID_SUBDISK_S_STALE:
210 return ("STALE");
211 case G_RAID_SUBDISK_S_RESYNC:
212 return ("RESYNC");
213 case G_RAID_SUBDISK_S_ACTIVE:
214 return ("ACTIVE");
215 default:
216 return ("INVALID");
217 }
218 }
219
220 static const char *
221 g_raid_subdisk_event2str(int event)
222 {
223
224 switch (event) {
225 case G_RAID_SUBDISK_E_NEW:
226 return ("NEW");
227 case G_RAID_SUBDISK_E_FAILED:
228 return ("FAILED");
229 case G_RAID_SUBDISK_E_DISCONNECTED:
230 return ("DISCONNECTED");
231 default:
232 return ("INVALID");
233 }
234 }
235
236 const char *
237 g_raid_volume_state2str(int state)
238 {
239
240 switch (state) {
241 case G_RAID_VOLUME_S_STARTING:
242 return ("STARTING");
243 case G_RAID_VOLUME_S_BROKEN:
244 return ("BROKEN");
245 case G_RAID_VOLUME_S_DEGRADED:
246 return ("DEGRADED");
247 case G_RAID_VOLUME_S_SUBOPTIMAL:
248 return ("SUBOPTIMAL");
249 case G_RAID_VOLUME_S_OPTIMAL:
250 return ("OPTIMAL");
251 case G_RAID_VOLUME_S_UNSUPPORTED:
252 return ("UNSUPPORTED");
253 case G_RAID_VOLUME_S_STOPPED:
254 return ("STOPPED");
255 default:
256 return ("INVALID");
257 }
258 }
259
260 static const char *
261 g_raid_volume_event2str(int event)
262 {
263
264 switch (event) {
265 case G_RAID_VOLUME_E_UP:
266 return ("UP");
267 case G_RAID_VOLUME_E_DOWN:
268 return ("DOWN");
269 case G_RAID_VOLUME_E_START:
270 return ("START");
271 case G_RAID_VOLUME_E_STARTMD:
272 return ("STARTMD");
273 default:
274 return ("INVALID");
275 }
276 }
277
278 const char *
279 g_raid_volume_level2str(int level, int qual)
280 {
281
282 switch (level) {
283 case G_RAID_VOLUME_RL_RAID0:
284 return ("RAID0");
285 case G_RAID_VOLUME_RL_RAID1:
286 return ("RAID1");
287 case G_RAID_VOLUME_RL_RAID3:
288 if (qual == G_RAID_VOLUME_RLQ_R3P0)
289 return ("RAID3-P0");
290 if (qual == G_RAID_VOLUME_RLQ_R3PN)
291 return ("RAID3-PN");
292 return ("RAID3");
293 case G_RAID_VOLUME_RL_RAID4:
294 if (qual == G_RAID_VOLUME_RLQ_R4P0)
295 return ("RAID4-P0");
296 if (qual == G_RAID_VOLUME_RLQ_R4PN)
297 return ("RAID4-PN");
298 return ("RAID4");
299 case G_RAID_VOLUME_RL_RAID5:
300 if (qual == G_RAID_VOLUME_RLQ_R5RA)
301 return ("RAID5-RA");
302 if (qual == G_RAID_VOLUME_RLQ_R5RS)
303 return ("RAID5-RS");
304 if (qual == G_RAID_VOLUME_RLQ_R5LA)
305 return ("RAID5-LA");
306 if (qual == G_RAID_VOLUME_RLQ_R5LS)
307 return ("RAID5-LS");
308 return ("RAID5");
309 case G_RAID_VOLUME_RL_RAID6:
310 if (qual == G_RAID_VOLUME_RLQ_R6RA)
311 return ("RAID6-RA");
312 if (qual == G_RAID_VOLUME_RLQ_R6RS)
313 return ("RAID6-RS");
314 if (qual == G_RAID_VOLUME_RLQ_R6LA)
315 return ("RAID6-LA");
316 if (qual == G_RAID_VOLUME_RLQ_R6LS)
317 return ("RAID6-LS");
318 return ("RAID6");
319 case G_RAID_VOLUME_RL_RAIDMDF:
320 if (qual == G_RAID_VOLUME_RLQ_RMDFRA)
321 return ("RAIDMDF-RA");
322 if (qual == G_RAID_VOLUME_RLQ_RMDFRS)
323 return ("RAIDMDF-RS");
324 if (qual == G_RAID_VOLUME_RLQ_RMDFLA)
325 return ("RAIDMDF-LA");
326 if (qual == G_RAID_VOLUME_RLQ_RMDFLS)
327 return ("RAIDMDF-LS");
328 return ("RAIDMDF");
329 case G_RAID_VOLUME_RL_RAID1E:
330 if (qual == G_RAID_VOLUME_RLQ_R1EA)
331 return ("RAID1E-A");
332 if (qual == G_RAID_VOLUME_RLQ_R1EO)
333 return ("RAID1E-O");
334 return ("RAID1E");
335 case G_RAID_VOLUME_RL_SINGLE:
336 return ("SINGLE");
337 case G_RAID_VOLUME_RL_CONCAT:
338 return ("CONCAT");
339 case G_RAID_VOLUME_RL_RAID5E:
340 if (qual == G_RAID_VOLUME_RLQ_R5ERA)
341 return ("RAID5E-RA");
342 if (qual == G_RAID_VOLUME_RLQ_R5ERS)
343 return ("RAID5E-RS");
344 if (qual == G_RAID_VOLUME_RLQ_R5ELA)
345 return ("RAID5E-LA");
346 if (qual == G_RAID_VOLUME_RLQ_R5ELS)
347 return ("RAID5E-LS");
348 return ("RAID5E");
349 case G_RAID_VOLUME_RL_RAID5EE:
350 if (qual == G_RAID_VOLUME_RLQ_R5EERA)
351 return ("RAID5EE-RA");
352 if (qual == G_RAID_VOLUME_RLQ_R5EERS)
353 return ("RAID5EE-RS");
354 if (qual == G_RAID_VOLUME_RLQ_R5EELA)
355 return ("RAID5EE-LA");
356 if (qual == G_RAID_VOLUME_RLQ_R5EELS)
357 return ("RAID5EE-LS");
358 return ("RAID5EE");
359 case G_RAID_VOLUME_RL_RAID5R:
360 if (qual == G_RAID_VOLUME_RLQ_R5RRA)
361 return ("RAID5R-RA");
362 if (qual == G_RAID_VOLUME_RLQ_R5RRS)
363 return ("RAID5R-RS");
364 if (qual == G_RAID_VOLUME_RLQ_R5RLA)
365 return ("RAID5R-LA");
366 if (qual == G_RAID_VOLUME_RLQ_R5RLS)
367 return ("RAID5R-LS");
368 return ("RAID5E");
369 default:
370 return ("UNKNOWN");
371 }
372 }
373
374 int
375 g_raid_volume_str2level(const char *str, int *level, int *qual)
376 {
377
378 *level = G_RAID_VOLUME_RL_UNKNOWN;
379 *qual = G_RAID_VOLUME_RLQ_NONE;
380 if (strcasecmp(str, "RAID0") == 0)
381 *level = G_RAID_VOLUME_RL_RAID0;
382 else if (strcasecmp(str, "RAID1") == 0)
383 *level = G_RAID_VOLUME_RL_RAID1;
384 else if (strcasecmp(str, "RAID3-P0") == 0) {
385 *level = G_RAID_VOLUME_RL_RAID3;
386 *qual = G_RAID_VOLUME_RLQ_R3P0;
387 } else if (strcasecmp(str, "RAID3-PN") == 0 ||
388 strcasecmp(str, "RAID3") == 0) {
389 *level = G_RAID_VOLUME_RL_RAID3;
390 *qual = G_RAID_VOLUME_RLQ_R3PN;
391 } else if (strcasecmp(str, "RAID4-P0") == 0) {
392 *level = G_RAID_VOLUME_RL_RAID4;
393 *qual = G_RAID_VOLUME_RLQ_R4P0;
394 } else if (strcasecmp(str, "RAID4-PN") == 0 ||
395 strcasecmp(str, "RAID4") == 0) {
396 *level = G_RAID_VOLUME_RL_RAID4;
397 *qual = G_RAID_VOLUME_RLQ_R4PN;
398 } else if (strcasecmp(str, "RAID5-RA") == 0) {
399 *level = G_RAID_VOLUME_RL_RAID5;
400 *qual = G_RAID_VOLUME_RLQ_R5RA;
401 } else if (strcasecmp(str, "RAID5-RS") == 0) {
402 *level = G_RAID_VOLUME_RL_RAID5;
403 *qual = G_RAID_VOLUME_RLQ_R5RS;
404 } else if (strcasecmp(str, "RAID5") == 0 ||
405 strcasecmp(str, "RAID5-LA") == 0) {
406 *level = G_RAID_VOLUME_RL_RAID5;
407 *qual = G_RAID_VOLUME_RLQ_R5LA;
408 } else if (strcasecmp(str, "RAID5-LS") == 0) {
409 *level = G_RAID_VOLUME_RL_RAID5;
410 *qual = G_RAID_VOLUME_RLQ_R5LS;
411 } else if (strcasecmp(str, "RAID6-RA") == 0) {
412 *level = G_RAID_VOLUME_RL_RAID6;
413 *qual = G_RAID_VOLUME_RLQ_R6RA;
414 } else if (strcasecmp(str, "RAID6-RS") == 0) {
415 *level = G_RAID_VOLUME_RL_RAID6;
416 *qual = G_RAID_VOLUME_RLQ_R6RS;
417 } else if (strcasecmp(str, "RAID6") == 0 ||
418 strcasecmp(str, "RAID6-LA") == 0) {
419 *level = G_RAID_VOLUME_RL_RAID6;
420 *qual = G_RAID_VOLUME_RLQ_R6LA;
421 } else if (strcasecmp(str, "RAID6-LS") == 0) {
422 *level = G_RAID_VOLUME_RL_RAID6;
423 *qual = G_RAID_VOLUME_RLQ_R6LS;
424 } else if (strcasecmp(str, "RAIDMDF-RA") == 0) {
425 *level = G_RAID_VOLUME_RL_RAIDMDF;
426 *qual = G_RAID_VOLUME_RLQ_RMDFRA;
427 } else if (strcasecmp(str, "RAIDMDF-RS") == 0) {
428 *level = G_RAID_VOLUME_RL_RAIDMDF;
429 *qual = G_RAID_VOLUME_RLQ_RMDFRS;
430 } else if (strcasecmp(str, "RAIDMDF") == 0 ||
431 strcasecmp(str, "RAIDMDF-LA") == 0) {
432 *level = G_RAID_VOLUME_RL_RAIDMDF;
433 *qual = G_RAID_VOLUME_RLQ_RMDFLA;
434 } else if (strcasecmp(str, "RAIDMDF-LS") == 0) {
435 *level = G_RAID_VOLUME_RL_RAIDMDF;
436 *qual = G_RAID_VOLUME_RLQ_RMDFLS;
437 } else if (strcasecmp(str, "RAID10") == 0 ||
438 strcasecmp(str, "RAID1E") == 0 ||
439 strcasecmp(str, "RAID1E-A") == 0) {
440 *level = G_RAID_VOLUME_RL_RAID1E;
441 *qual = G_RAID_VOLUME_RLQ_R1EA;
442 } else if (strcasecmp(str, "RAID1E-O") == 0) {
443 *level = G_RAID_VOLUME_RL_RAID1E;
444 *qual = G_RAID_VOLUME_RLQ_R1EO;
445 } else if (strcasecmp(str, "SINGLE") == 0)
446 *level = G_RAID_VOLUME_RL_SINGLE;
447 else if (strcasecmp(str, "CONCAT") == 0)
448 *level = G_RAID_VOLUME_RL_CONCAT;
449 else if (strcasecmp(str, "RAID5E-RA") == 0) {
450 *level = G_RAID_VOLUME_RL_RAID5E;
451 *qual = G_RAID_VOLUME_RLQ_R5ERA;
452 } else if (strcasecmp(str, "RAID5E-RS") == 0) {
453 *level = G_RAID_VOLUME_RL_RAID5E;
454 *qual = G_RAID_VOLUME_RLQ_R5ERS;
455 } else if (strcasecmp(str, "RAID5E") == 0 ||
456 strcasecmp(str, "RAID5E-LA") == 0) {
457 *level = G_RAID_VOLUME_RL_RAID5E;
458 *qual = G_RAID_VOLUME_RLQ_R5ELA;
459 } else if (strcasecmp(str, "RAID5E-LS") == 0) {
460 *level = G_RAID_VOLUME_RL_RAID5E;
461 *qual = G_RAID_VOLUME_RLQ_R5ELS;
462 } else if (strcasecmp(str, "RAID5EE-RA") == 0) {
463 *level = G_RAID_VOLUME_RL_RAID5EE;
464 *qual = G_RAID_VOLUME_RLQ_R5EERA;
465 } else if (strcasecmp(str, "RAID5EE-RS") == 0) {
466 *level = G_RAID_VOLUME_RL_RAID5EE;
467 *qual = G_RAID_VOLUME_RLQ_R5EERS;
468 } else if (strcasecmp(str, "RAID5EE") == 0 ||
469 strcasecmp(str, "RAID5EE-LA") == 0) {
470 *level = G_RAID_VOLUME_RL_RAID5EE;
471 *qual = G_RAID_VOLUME_RLQ_R5EELA;
472 } else if (strcasecmp(str, "RAID5EE-LS") == 0) {
473 *level = G_RAID_VOLUME_RL_RAID5EE;
474 *qual = G_RAID_VOLUME_RLQ_R5EELS;
475 } else if (strcasecmp(str, "RAID5R-RA") == 0) {
476 *level = G_RAID_VOLUME_RL_RAID5R;
477 *qual = G_RAID_VOLUME_RLQ_R5RRA;
478 } else if (strcasecmp(str, "RAID5R-RS") == 0) {
479 *level = G_RAID_VOLUME_RL_RAID5R;
480 *qual = G_RAID_VOLUME_RLQ_R5RRS;
481 } else if (strcasecmp(str, "RAID5R") == 0 ||
482 strcasecmp(str, "RAID5R-LA") == 0) {
483 *level = G_RAID_VOLUME_RL_RAID5R;
484 *qual = G_RAID_VOLUME_RLQ_R5RLA;
485 } else if (strcasecmp(str, "RAID5R-LS") == 0) {
486 *level = G_RAID_VOLUME_RL_RAID5R;
487 *qual = G_RAID_VOLUME_RLQ_R5RLS;
488 } else
489 return (-1);
490 return (0);
491 }
492
493 const char *
494 g_raid_get_diskname(struct g_raid_disk *disk)
495 {
496
497 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
498 return ("[unknown]");
499 return (disk->d_consumer->provider->name);
500 }
501
502 void
503 g_raid_get_disk_info(struct g_raid_disk *disk)
504 {
505 struct g_consumer *cp = disk->d_consumer;
506 int error, len;
507
508 /* Read kernel dumping information. */
509 disk->d_kd.offset = 0;
510 disk->d_kd.length = OFF_MAX;
511 len = sizeof(disk->d_kd);
512 error = g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd);
513 if (error)
514 disk->d_kd.di.dumper = NULL;
515 if (disk->d_kd.di.dumper == NULL)
516 G_RAID_DEBUG1(2, disk->d_softc,
517 "Dumping not supported by %s: %d.",
518 cp->provider->name, error);
519
520 /* Read BIO_DELETE support. */
521 error = g_getattr("GEOM::candelete", cp, &disk->d_candelete);
522 if (error)
523 disk->d_candelete = 0;
524 if (!disk->d_candelete)
525 G_RAID_DEBUG1(2, disk->d_softc,
526 "BIO_DELETE not supported by %s: %d.",
527 cp->provider->name, error);
528 }
529
530 void
531 g_raid_report_disk_state(struct g_raid_disk *disk)
532 {
533 struct g_raid_subdisk *sd;
534 int len, state;
535 uint32_t s;
536
537 if (disk->d_consumer == NULL)
538 return;
539 if (disk->d_state == G_RAID_DISK_S_DISABLED) {
540 s = G_STATE_ACTIVE; /* XXX */
541 } else if (disk->d_state == G_RAID_DISK_S_FAILED ||
542 disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
543 s = G_STATE_FAILED;
544 } else {
545 state = G_RAID_SUBDISK_S_ACTIVE;
546 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
547 if (sd->sd_state < state)
548 state = sd->sd_state;
549 }
550 if (state == G_RAID_SUBDISK_S_FAILED)
551 s = G_STATE_FAILED;
552 else if (state == G_RAID_SUBDISK_S_NEW ||
553 state == G_RAID_SUBDISK_S_REBUILD)
554 s = G_STATE_REBUILD;
555 else if (state == G_RAID_SUBDISK_S_STALE ||
556 state == G_RAID_SUBDISK_S_RESYNC)
557 s = G_STATE_RESYNC;
558 else
559 s = G_STATE_ACTIVE;
560 }
561 len = sizeof(s);
562 g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
563 G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.",
564 g_raid_get_diskname(disk), s);
565 }
566
567 void
568 g_raid_change_disk_state(struct g_raid_disk *disk, int state)
569 {
570
571 G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.",
572 g_raid_get_diskname(disk),
573 g_raid_disk_state2str(disk->d_state),
574 g_raid_disk_state2str(state));
575 disk->d_state = state;
576 g_raid_report_disk_state(disk);
577 }
578
579 void
580 g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state)
581 {
582
583 G_RAID_DEBUG1(0, sd->sd_softc,
584 "Subdisk %s:%d-%s state changed from %s to %s.",
585 sd->sd_volume->v_name, sd->sd_pos,
586 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]",
587 g_raid_subdisk_state2str(sd->sd_state),
588 g_raid_subdisk_state2str(state));
589 sd->sd_state = state;
590 if (sd->sd_disk)
591 g_raid_report_disk_state(sd->sd_disk);
592 }
593
594 void
595 g_raid_change_volume_state(struct g_raid_volume *vol, int state)
596 {
597
598 G_RAID_DEBUG1(0, vol->v_softc,
599 "Volume %s state changed from %s to %s.",
600 vol->v_name,
601 g_raid_volume_state2str(vol->v_state),
602 g_raid_volume_state2str(state));
603 vol->v_state = state;
604 }
605
606 /*
607 * --- Events handling functions ---
608 * Events in geom_raid are used to maintain subdisks and volumes status
609 * from one thread to simplify locking.
610 */
611 static void
612 g_raid_event_free(struct g_raid_event *ep)
613 {
614
615 free(ep, M_RAID);
616 }
617
618 int
619 g_raid_event_send(void *arg, int event, int flags)
620 {
621 struct g_raid_softc *sc;
622 struct g_raid_event *ep;
623 int error;
624
625 if ((flags & G_RAID_EVENT_VOLUME) != 0) {
626 sc = ((struct g_raid_volume *)arg)->v_softc;
627 } else if ((flags & G_RAID_EVENT_DISK) != 0) {
628 sc = ((struct g_raid_disk *)arg)->d_softc;
629 } else if ((flags & G_RAID_EVENT_SUBDISK) != 0) {
630 sc = ((struct g_raid_subdisk *)arg)->sd_softc;
631 } else {
632 sc = arg;
633 }
634 ep = malloc(sizeof(*ep), M_RAID,
635 sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT);
636 if (ep == NULL)
637 return (ENOMEM);
638 ep->e_tgt = arg;
639 ep->e_event = event;
640 ep->e_flags = flags;
641 ep->e_error = 0;
642 G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc);
643 mtx_lock(&sc->sc_queue_mtx);
644 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
645 mtx_unlock(&sc->sc_queue_mtx);
646 wakeup(sc);
647
648 if ((flags & G_RAID_EVENT_WAIT) == 0)
649 return (0);
650
651 sx_assert(&sc->sc_lock, SX_XLOCKED);
652 G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep);
653 sx_xunlock(&sc->sc_lock);
654 while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) {
655 mtx_lock(&sc->sc_queue_mtx);
656 MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event",
657 hz * 5);
658 }
659 error = ep->e_error;
660 g_raid_event_free(ep);
661 sx_xlock(&sc->sc_lock);
662 return (error);
663 }
664
665 static void
666 g_raid_event_cancel(struct g_raid_softc *sc, void *tgt)
667 {
668 struct g_raid_event *ep, *tmpep;
669
670 sx_assert(&sc->sc_lock, SX_XLOCKED);
671
672 mtx_lock(&sc->sc_queue_mtx);
673 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
674 if (ep->e_tgt != tgt)
675 continue;
676 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
677 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0)
678 g_raid_event_free(ep);
679 else {
680 ep->e_error = ECANCELED;
681 wakeup(ep);
682 }
683 }
684 mtx_unlock(&sc->sc_queue_mtx);
685 }
686
687 static int
688 g_raid_event_check(struct g_raid_softc *sc, void *tgt)
689 {
690 struct g_raid_event *ep;
691 int res = 0;
692
693 sx_assert(&sc->sc_lock, SX_XLOCKED);
694
695 mtx_lock(&sc->sc_queue_mtx);
696 TAILQ_FOREACH(ep, &sc->sc_events, e_next) {
697 if (ep->e_tgt != tgt)
698 continue;
699 res = 1;
700 break;
701 }
702 mtx_unlock(&sc->sc_queue_mtx);
703 return (res);
704 }
705
706 /*
707 * Return the number of disks in given state.
708 * If state is equal to -1, count all connected disks.
709 */
710 u_int
711 g_raid_ndisks(struct g_raid_softc *sc, int state)
712 {
713 struct g_raid_disk *disk;
714 u_int n;
715
716 sx_assert(&sc->sc_lock, SX_LOCKED);
717
718 n = 0;
719 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
720 if (disk->d_state == state || state == -1)
721 n++;
722 }
723 return (n);
724 }
725
726 /*
727 * Return the number of subdisks in given state.
728 * If state is equal to -1, count all connected disks.
729 */
730 u_int
731 g_raid_nsubdisks(struct g_raid_volume *vol, int state)
732 {
733 struct g_raid_subdisk *subdisk;
734 struct g_raid_softc *sc;
735 u_int i, n ;
736
737 sc = vol->v_softc;
738 sx_assert(&sc->sc_lock, SX_LOCKED);
739
740 n = 0;
741 for (i = 0; i < vol->v_disks_count; i++) {
742 subdisk = &vol->v_subdisks[i];
743 if ((state == -1 &&
744 subdisk->sd_state != G_RAID_SUBDISK_S_NONE) ||
745 subdisk->sd_state == state)
746 n++;
747 }
748 return (n);
749 }
750
751 /*
752 * Return the first subdisk in given state.
753 * If state is equal to -1, then the first connected disks.
754 */
755 struct g_raid_subdisk *
756 g_raid_get_subdisk(struct g_raid_volume *vol, int state)
757 {
758 struct g_raid_subdisk *sd;
759 struct g_raid_softc *sc;
760 u_int i;
761
762 sc = vol->v_softc;
763 sx_assert(&sc->sc_lock, SX_LOCKED);
764
765 for (i = 0; i < vol->v_disks_count; i++) {
766 sd = &vol->v_subdisks[i];
767 if ((state == -1 &&
768 sd->sd_state != G_RAID_SUBDISK_S_NONE) ||
769 sd->sd_state == state)
770 return (sd);
771 }
772 return (NULL);
773 }
774
775 struct g_consumer *
776 g_raid_open_consumer(struct g_raid_softc *sc, const char *name)
777 {
778 struct g_consumer *cp;
779 struct g_provider *pp;
780
781 g_topology_assert();
782
783 if (strncmp(name, "/dev/", 5) == 0)
784 name += 5;
785 pp = g_provider_by_name(name);
786 if (pp == NULL)
787 return (NULL);
788 cp = g_new_consumer(sc->sc_geom);
789 if (g_attach(cp, pp) != 0) {
790 g_destroy_consumer(cp);
791 return (NULL);
792 }
793 if (g_access(cp, 1, 1, 1) != 0) {
794 g_detach(cp);
795 g_destroy_consumer(cp);
796 return (NULL);
797 }
798 return (cp);
799 }
800
801 static u_int
802 g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp)
803 {
804 struct bio *bp;
805 u_int nreqs = 0;
806
807 mtx_lock(&sc->sc_queue_mtx);
808 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
809 if (bp->bio_from == cp)
810 nreqs++;
811 }
812 mtx_unlock(&sc->sc_queue_mtx);
813 return (nreqs);
814 }
815
816 u_int
817 g_raid_nopens(struct g_raid_softc *sc)
818 {
819 struct g_raid_volume *vol;
820 u_int opens;
821
822 opens = 0;
823 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
824 if (vol->v_provider_open != 0)
825 opens++;
826 }
827 return (opens);
828 }
829
830 static int
831 g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp)
832 {
833
834 if (cp->index > 0) {
835 G_RAID_DEBUG1(2, sc,
836 "I/O requests for %s exist, can't destroy it now.",
837 cp->provider->name);
838 return (1);
839 }
840 if (g_raid_nrequests(sc, cp) > 0) {
841 G_RAID_DEBUG1(2, sc,
842 "I/O requests for %s in queue, can't destroy it now.",
843 cp->provider->name);
844 return (1);
845 }
846 return (0);
847 }
848
849 static void
850 g_raid_destroy_consumer(void *arg, int flags __unused)
851 {
852 struct g_consumer *cp;
853
854 g_topology_assert();
855
856 cp = arg;
857 G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
858 g_detach(cp);
859 g_destroy_consumer(cp);
860 }
861
862 void
863 g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp)
864 {
865 struct g_provider *pp;
866 int retaste_wait;
867
868 g_topology_assert_not();
869
870 g_topology_lock();
871 cp->private = NULL;
872 if (g_raid_consumer_is_busy(sc, cp))
873 goto out;
874 pp = cp->provider;
875 retaste_wait = 0;
876 if (cp->acw == 1) {
877 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
878 retaste_wait = 1;
879 }
880 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
881 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
882 if (retaste_wait) {
883 /*
884 * After retaste event was send (inside g_access()), we can send
885 * event to detach and destroy consumer.
886 * A class, which has consumer to the given provider connected
887 * will not receive retaste event for the provider.
888 * This is the way how I ignore retaste events when I close
889 * consumers opened for write: I detach and destroy consumer
890 * after retaste event is sent.
891 */
892 g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL);
893 goto out;
894 }
895 G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name);
896 g_detach(cp);
897 g_destroy_consumer(cp);
898 out:
899 g_topology_unlock();
900 }
901
902 static void
903 g_raid_orphan(struct g_consumer *cp)
904 {
905 struct g_raid_disk *disk;
906
907 g_topology_assert();
908
909 disk = cp->private;
910 if (disk == NULL)
911 return;
912 g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED,
913 G_RAID_EVENT_DISK);
914 }
915
916 static void
917 g_raid_clean(struct g_raid_volume *vol, int acw)
918 {
919 struct g_raid_softc *sc;
920 int timeout;
921
922 sc = vol->v_softc;
923 g_topology_assert_not();
924 sx_assert(&sc->sc_lock, SX_XLOCKED);
925
926 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
927 // return;
928 if (!vol->v_dirty)
929 return;
930 if (vol->v_writes > 0)
931 return;
932 if (acw > 0 || (acw == -1 &&
933 vol->v_provider != NULL && vol->v_provider->acw > 0)) {
934 timeout = g_raid_clean_time - (time_uptime - vol->v_last_write);
935 if (!g_raid_shutdown && timeout > 0)
936 return;
937 }
938 vol->v_dirty = 0;
939 G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.",
940 vol->v_name);
941 g_raid_write_metadata(sc, vol, NULL, NULL);
942 }
943
944 static void
945 g_raid_dirty(struct g_raid_volume *vol)
946 {
947 struct g_raid_softc *sc;
948
949 sc = vol->v_softc;
950 g_topology_assert_not();
951 sx_assert(&sc->sc_lock, SX_XLOCKED);
952
953 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
954 // return;
955 vol->v_dirty = 1;
956 G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.",
957 vol->v_name);
958 g_raid_write_metadata(sc, vol, NULL, NULL);
959 }
960
961 void
962 g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp)
963 {
964 struct g_raid_softc *sc;
965 struct g_raid_volume *vol;
966 struct g_raid_subdisk *sd;
967 struct bio_queue_head queue;
968 struct bio *cbp;
969 int i;
970
971 vol = tr->tro_volume;
972 sc = vol->v_softc;
973
974 /*
975 * Allocate all bios before sending any request, so we can return
976 * ENOMEM in nice and clean way.
977 */
978 bioq_init(&queue);
979 for (i = 0; i < vol->v_disks_count; i++) {
980 sd = &vol->v_subdisks[i];
981 if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
982 sd->sd_state == G_RAID_SUBDISK_S_FAILED)
983 continue;
984 cbp = g_clone_bio(bp);
985 if (cbp == NULL)
986 goto failure;
987 cbp->bio_caller1 = sd;
988 bioq_insert_tail(&queue, cbp);
989 }
990 for (cbp = bioq_first(&queue); cbp != NULL;
991 cbp = bioq_first(&queue)) {
992 bioq_remove(&queue, cbp);
993 sd = cbp->bio_caller1;
994 cbp->bio_caller1 = NULL;
995 g_raid_subdisk_iostart(sd, cbp);
996 }
997 return;
998 failure:
999 for (cbp = bioq_first(&queue); cbp != NULL;
1000 cbp = bioq_first(&queue)) {
1001 bioq_remove(&queue, cbp);
1002 g_destroy_bio(cbp);
1003 }
1004 if (bp->bio_error == 0)
1005 bp->bio_error = ENOMEM;
1006 g_raid_iodone(bp, bp->bio_error);
1007 }
1008
1009 static void
1010 g_raid_tr_kerneldump_common_done(struct bio *bp)
1011 {
1012
1013 bp->bio_flags |= BIO_DONE;
1014 }
1015
1016 int
1017 g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
1018 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1019 {
1020 struct g_raid_softc *sc;
1021 struct g_raid_volume *vol;
1022 struct bio bp;
1023
1024 vol = tr->tro_volume;
1025 sc = vol->v_softc;
1026
1027 bzero(&bp, sizeof(bp));
1028 bp.bio_cmd = BIO_WRITE;
1029 bp.bio_done = g_raid_tr_kerneldump_common_done;
1030 bp.bio_attribute = NULL;
1031 bp.bio_offset = offset;
1032 bp.bio_length = length;
1033 bp.bio_data = virtual;
1034 bp.bio_to = vol->v_provider;
1035
1036 g_raid_start(&bp);
1037 while (!(bp.bio_flags & BIO_DONE)) {
1038 G_RAID_DEBUG1(4, sc, "Poll...");
1039 g_raid_poll(sc);
1040 DELAY(10);
1041 }
1042
1043 return (bp.bio_error != 0 ? EIO : 0);
1044 }
1045
1046 static int
1047 g_raid_dump(void *arg,
1048 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1049 {
1050 struct g_raid_volume *vol;
1051 int error;
1052
1053 vol = (struct g_raid_volume *)arg;
1054 G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.",
1055 (long long unsigned)offset, (long long unsigned)length);
1056
1057 error = G_RAID_TR_KERNELDUMP(vol->v_tr,
1058 virtual, physical, offset, length);
1059 return (error);
1060 }
1061
1062 static void
1063 g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp)
1064 {
1065 struct g_kerneldump *gkd;
1066 struct g_provider *pp;
1067 struct g_raid_volume *vol;
1068
1069 gkd = (struct g_kerneldump*)bp->bio_data;
1070 pp = bp->bio_to;
1071 vol = pp->private;
1072 g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)",
1073 pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
1074 gkd->di.dumper = g_raid_dump;
1075 gkd->di.priv = vol;
1076 gkd->di.blocksize = vol->v_sectorsize;
1077 gkd->di.maxiosize = DFLTPHYS;
1078 gkd->di.mediaoffset = gkd->offset;
1079 if ((gkd->offset + gkd->length) > vol->v_mediasize)
1080 gkd->length = vol->v_mediasize - gkd->offset;
1081 gkd->di.mediasize = gkd->length;
1082 g_io_deliver(bp, 0);
1083 }
1084
1085 static void
1086 g_raid_candelete(struct g_raid_softc *sc, struct bio *bp)
1087 {
1088 struct g_provider *pp;
1089 struct g_raid_volume *vol;
1090 struct g_raid_subdisk *sd;
1091 int *val;
1092 int i;
1093
1094 val = (int *)bp->bio_data;
1095 pp = bp->bio_to;
1096 vol = pp->private;
1097 *val = 0;
1098 for (i = 0; i < vol->v_disks_count; i++) {
1099 sd = &vol->v_subdisks[i];
1100 if (sd->sd_state == G_RAID_SUBDISK_S_NONE)
1101 continue;
1102 if (sd->sd_disk->d_candelete) {
1103 *val = 1;
1104 break;
1105 }
1106 }
1107 g_io_deliver(bp, 0);
1108 }
1109
1110 static void
1111 g_raid_start(struct bio *bp)
1112 {
1113 struct g_raid_softc *sc;
1114
1115 sc = bp->bio_to->geom->softc;
1116 /*
1117 * If sc == NULL or there are no valid disks, provider's error
1118 * should be set and g_raid_start() should not be called at all.
1119 */
1120 // KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING,
1121 // ("Provider's error should be set (error=%d)(mirror=%s).",
1122 // bp->bio_to->error, bp->bio_to->name));
1123 G_RAID_LOGREQ(3, bp, "Request received.");
1124
1125 switch (bp->bio_cmd) {
1126 case BIO_READ:
1127 case BIO_WRITE:
1128 case BIO_DELETE:
1129 case BIO_FLUSH:
1130 break;
1131 case BIO_GETATTR:
1132 if (!strcmp(bp->bio_attribute, "GEOM::candelete"))
1133 g_raid_candelete(sc, bp);
1134 else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
1135 g_raid_kerneldump(sc, bp);
1136 else
1137 g_io_deliver(bp, EOPNOTSUPP);
1138 return;
1139 default:
1140 g_io_deliver(bp, EOPNOTSUPP);
1141 return;
1142 }
1143 mtx_lock(&sc->sc_queue_mtx);
1144 bioq_insert_tail(&sc->sc_queue, bp);
1145 mtx_unlock(&sc->sc_queue_mtx);
1146 if (!dumping) {
1147 G_RAID_DEBUG1(4, sc, "Waking up %p.", sc);
1148 wakeup(sc);
1149 }
1150 }
1151
1152 static int
1153 g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len)
1154 {
1155 /*
1156 * 5 cases:
1157 * (1) bp entirely below NO
1158 * (2) bp entirely above NO
1159 * (3) bp start below, but end in range YES
1160 * (4) bp entirely within YES
1161 * (5) bp starts within, ends above YES
1162 *
1163 * lock range 10-19 (offset 10 length 10)
1164 * (1) 1-5: first if kicks it out
1165 * (2) 30-35: second if kicks it out
1166 * (3) 5-15: passes both ifs
1167 * (4) 12-14: passes both ifs
1168 * (5) 19-20: passes both
1169 */
1170 off_t lend = lstart + len - 1;
1171 off_t bstart = bp->bio_offset;
1172 off_t bend = bp->bio_offset + bp->bio_length - 1;
1173
1174 if (bend < lstart)
1175 return (0);
1176 if (lend < bstart)
1177 return (0);
1178 return (1);
1179 }
1180
1181 static int
1182 g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp)
1183 {
1184 struct g_raid_lock *lp;
1185
1186 sx_assert(&vol->v_softc->sc_lock, SX_LOCKED);
1187
1188 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1189 if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length))
1190 return (1);
1191 }
1192 return (0);
1193 }
1194
1195 static void
1196 g_raid_start_request(struct bio *bp)
1197 {
1198 struct g_raid_softc *sc;
1199 struct g_raid_volume *vol;
1200
1201 sc = bp->bio_to->geom->softc;
1202 sx_assert(&sc->sc_lock, SX_LOCKED);
1203 vol = bp->bio_to->private;
1204
1205 /*
1206 * Check to see if this item is in a locked range. If so,
1207 * queue it to our locked queue and return. We'll requeue
1208 * it when the range is unlocked. Internal I/O for the
1209 * rebuild/rescan/recovery process is excluded from this
1210 * check so we can actually do the recovery.
1211 */
1212 if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) &&
1213 g_raid_is_in_locked_range(vol, bp)) {
1214 G_RAID_LOGREQ(3, bp, "Defer request.");
1215 bioq_insert_tail(&vol->v_locked, bp);
1216 return;
1217 }
1218
1219 /*
1220 * If we're actually going to do the write/delete, then
1221 * update the idle stats for the volume.
1222 */
1223 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1224 if (!vol->v_dirty)
1225 g_raid_dirty(vol);
1226 vol->v_writes++;
1227 }
1228
1229 /*
1230 * Put request onto inflight queue, so we can check if new
1231 * synchronization requests don't collide with it. Then tell
1232 * the transformation layer to start the I/O.
1233 */
1234 bioq_insert_tail(&vol->v_inflight, bp);
1235 G_RAID_LOGREQ(4, bp, "Request started");
1236 G_RAID_TR_IOSTART(vol->v_tr, bp);
1237 }
1238
1239 static void
1240 g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp)
1241 {
1242 off_t off, len;
1243 struct bio *nbp;
1244 struct g_raid_lock *lp;
1245
1246 vol->v_pending_lock = 0;
1247 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1248 if (lp->l_pending) {
1249 off = lp->l_offset;
1250 len = lp->l_length;
1251 lp->l_pending = 0;
1252 TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) {
1253 if (g_raid_bio_overlaps(nbp, off, len))
1254 lp->l_pending++;
1255 }
1256 if (lp->l_pending) {
1257 vol->v_pending_lock = 1;
1258 G_RAID_DEBUG1(4, vol->v_softc,
1259 "Deferred lock(%jd, %jd) has %d pending",
1260 (intmax_t)off, (intmax_t)(off + len),
1261 lp->l_pending);
1262 continue;
1263 }
1264 G_RAID_DEBUG1(4, vol->v_softc,
1265 "Deferred lock of %jd to %jd completed",
1266 (intmax_t)off, (intmax_t)(off + len));
1267 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1268 }
1269 }
1270 }
1271
1272 void
1273 g_raid_iodone(struct bio *bp, int error)
1274 {
1275 struct g_raid_softc *sc;
1276 struct g_raid_volume *vol;
1277
1278 sc = bp->bio_to->geom->softc;
1279 sx_assert(&sc->sc_lock, SX_LOCKED);
1280 vol = bp->bio_to->private;
1281 G_RAID_LOGREQ(3, bp, "Request done: %d.", error);
1282
1283 /* Update stats if we done write/delete. */
1284 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1285 vol->v_writes--;
1286 vol->v_last_write = time_uptime;
1287 }
1288
1289 bioq_remove(&vol->v_inflight, bp);
1290 if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp))
1291 g_raid_finish_with_locked_ranges(vol, bp);
1292 getmicrouptime(&vol->v_last_done);
1293 g_io_deliver(bp, error);
1294 }
1295
1296 int
1297 g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
1298 struct bio *ignore, void *argp)
1299 {
1300 struct g_raid_softc *sc;
1301 struct g_raid_lock *lp;
1302 struct bio *bp;
1303
1304 sc = vol->v_softc;
1305 lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO);
1306 LIST_INSERT_HEAD(&vol->v_locks, lp, l_next);
1307 lp->l_offset = off;
1308 lp->l_length = len;
1309 lp->l_callback_arg = argp;
1310
1311 lp->l_pending = 0;
1312 TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) {
1313 if (bp != ignore && g_raid_bio_overlaps(bp, off, len))
1314 lp->l_pending++;
1315 }
1316
1317 /*
1318 * If there are any writes that are pending, we return EBUSY. All
1319 * callers will have to wait until all pending writes clear.
1320 */
1321 if (lp->l_pending > 0) {
1322 vol->v_pending_lock = 1;
1323 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend",
1324 (intmax_t)off, (intmax_t)(off+len), lp->l_pending);
1325 return (EBUSY);
1326 }
1327 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd",
1328 (intmax_t)off, (intmax_t)(off+len));
1329 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1330 return (0);
1331 }
1332
1333 int
1334 g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len)
1335 {
1336 struct g_raid_lock *lp;
1337 struct g_raid_softc *sc;
1338 struct bio *bp;
1339
1340 sc = vol->v_softc;
1341 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1342 if (lp->l_offset == off && lp->l_length == len) {
1343 LIST_REMOVE(lp, l_next);
1344 /* XXX
1345 * Right now we just put them all back on the queue
1346 * and hope for the best. We hope this because any
1347 * locked ranges will go right back on this list
1348 * when the worker thread runs.
1349 * XXX
1350 */
1351 G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd",
1352 (intmax_t)lp->l_offset,
1353 (intmax_t)(lp->l_offset+lp->l_length));
1354 mtx_lock(&sc->sc_queue_mtx);
1355 while ((bp = bioq_takefirst(&vol->v_locked)) != NULL)
1356 bioq_insert_tail(&sc->sc_queue, bp);
1357 mtx_unlock(&sc->sc_queue_mtx);
1358 free(lp, M_RAID);
1359 return (0);
1360 }
1361 }
1362 return (EINVAL);
1363 }
1364
1365 void
1366 g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp)
1367 {
1368 struct g_consumer *cp;
1369 struct g_raid_disk *disk, *tdisk;
1370
1371 bp->bio_caller1 = sd;
1372
1373 /*
1374 * Make sure that the disk is present. Generally it is a task of
1375 * transformation layers to not send requests to absent disks, but
1376 * it is better to be safe and report situation then sorry.
1377 */
1378 if (sd->sd_disk == NULL) {
1379 G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!");
1380 nodisk:
1381 bp->bio_from = NULL;
1382 bp->bio_to = NULL;
1383 bp->bio_error = ENXIO;
1384 g_raid_disk_done(bp);
1385 return;
1386 }
1387 disk = sd->sd_disk;
1388 if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
1389 disk->d_state != G_RAID_DISK_S_FAILED) {
1390 G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a "
1391 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
1392 goto nodisk;
1393 }
1394
1395 cp = disk->d_consumer;
1396 bp->bio_from = cp;
1397 bp->bio_to = cp->provider;
1398 cp->index++;
1399
1400 /* Update average disks load. */
1401 TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) {
1402 if (tdisk->d_consumer == NULL)
1403 tdisk->d_load = 0;
1404 else
1405 tdisk->d_load = (tdisk->d_consumer->index *
1406 G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8;
1407 }
1408
1409 disk->d_last_offset = bp->bio_offset + bp->bio_length;
1410 if (dumping) {
1411 G_RAID_LOGREQ(3, bp, "Sending dumping request.");
1412 if (bp->bio_cmd == BIO_WRITE) {
1413 bp->bio_error = g_raid_subdisk_kerneldump(sd,
1414 bp->bio_data, 0, bp->bio_offset, bp->bio_length);
1415 } else
1416 bp->bio_error = EOPNOTSUPP;
1417 g_raid_disk_done(bp);
1418 } else {
1419 bp->bio_done = g_raid_disk_done;
1420 bp->bio_offset += sd->sd_offset;
1421 G_RAID_LOGREQ(3, bp, "Sending request.");
1422 g_io_request(bp, cp);
1423 }
1424 }
1425
1426 int
1427 g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd,
1428 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1429 {
1430
1431 if (sd->sd_disk == NULL)
1432 return (ENXIO);
1433 if (sd->sd_disk->d_kd.di.dumper == NULL)
1434 return (EOPNOTSUPP);
1435 return (dump_write(&sd->sd_disk->d_kd.di,
1436 virtual, physical,
1437 sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset,
1438 length));
1439 }
1440
1441 static void
1442 g_raid_disk_done(struct bio *bp)
1443 {
1444 struct g_raid_softc *sc;
1445 struct g_raid_subdisk *sd;
1446
1447 sd = bp->bio_caller1;
1448 sc = sd->sd_softc;
1449 mtx_lock(&sc->sc_queue_mtx);
1450 bioq_insert_tail(&sc->sc_queue, bp);
1451 mtx_unlock(&sc->sc_queue_mtx);
1452 if (!dumping)
1453 wakeup(sc);
1454 }
1455
1456 static void
1457 g_raid_disk_done_request(struct bio *bp)
1458 {
1459 struct g_raid_softc *sc;
1460 struct g_raid_disk *disk;
1461 struct g_raid_subdisk *sd;
1462 struct g_raid_volume *vol;
1463
1464 g_topology_assert_not();
1465
1466 G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error);
1467 sd = bp->bio_caller1;
1468 sc = sd->sd_softc;
1469 vol = sd->sd_volume;
1470 if (bp->bio_from != NULL) {
1471 bp->bio_from->index--;
1472 disk = bp->bio_from->private;
1473 if (disk == NULL)
1474 g_raid_kill_consumer(sc, bp->bio_from);
1475 }
1476 bp->bio_offset -= sd->sd_offset;
1477
1478 G_RAID_TR_IODONE(vol->v_tr, sd, bp);
1479 }
1480
1481 static void
1482 g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep)
1483 {
1484
1485 if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0)
1486 ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event);
1487 else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0)
1488 ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event);
1489 else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0)
1490 ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event);
1491 else
1492 ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event);
1493 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) {
1494 KASSERT(ep->e_error == 0,
1495 ("Error cannot be handled."));
1496 g_raid_event_free(ep);
1497 } else {
1498 ep->e_flags |= G_RAID_EVENT_DONE;
1499 G_RAID_DEBUG1(4, sc, "Waking up %p.", ep);
1500 mtx_lock(&sc->sc_queue_mtx);
1501 wakeup(ep);
1502 mtx_unlock(&sc->sc_queue_mtx);
1503 }
1504 }
1505
1506 /*
1507 * Worker thread.
1508 */
1509 static void
1510 g_raid_worker(void *arg)
1511 {
1512 struct g_raid_softc *sc;
1513 struct g_raid_event *ep;
1514 struct g_raid_volume *vol;
1515 struct bio *bp;
1516 struct timeval now, t;
1517 int timeout, rv;
1518
1519 sc = arg;
1520 thread_lock(curthread);
1521 sched_prio(curthread, PRIBIO);
1522 thread_unlock(curthread);
1523
1524 sx_xlock(&sc->sc_lock);
1525 for (;;) {
1526 mtx_lock(&sc->sc_queue_mtx);
1527 /*
1528 * First take a look at events.
1529 * This is important to handle events before any I/O requests.
1530 */
1531 bp = NULL;
1532 vol = NULL;
1533 rv = 0;
1534 ep = TAILQ_FIRST(&sc->sc_events);
1535 if (ep != NULL)
1536 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1537 else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL)
1538 ;
1539 else {
1540 getmicrouptime(&now);
1541 t = now;
1542 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1543 if (bioq_first(&vol->v_inflight) == NULL &&
1544 vol->v_tr &&
1545 timevalcmp(&vol->v_last_done, &t, < ))
1546 t = vol->v_last_done;
1547 }
1548 timevalsub(&t, &now);
1549 timeout = g_raid_idle_threshold +
1550 t.tv_sec * 1000000 + t.tv_usec;
1551 if (timeout > 0) {
1552 /*
1553 * Two steps to avoid overflows at HZ=1000
1554 * and idle timeouts > 2.1s. Some rounding
1555 * errors can occur, but they are < 1tick,
1556 * which is deemed to be close enough for
1557 * this purpose.
1558 */
1559 int micpertic = 1000000 / hz;
1560 timeout = (timeout + micpertic - 1) / micpertic;
1561 sx_xunlock(&sc->sc_lock);
1562 MSLEEP(rv, sc, &sc->sc_queue_mtx,
1563 PRIBIO | PDROP, "-", timeout);
1564 sx_xlock(&sc->sc_lock);
1565 goto process;
1566 } else
1567 rv = EWOULDBLOCK;
1568 }
1569 mtx_unlock(&sc->sc_queue_mtx);
1570 process:
1571 if (ep != NULL) {
1572 g_raid_handle_event(sc, ep);
1573 } else if (bp != NULL) {
1574 if (bp->bio_to != NULL &&
1575 bp->bio_to->geom == sc->sc_geom)
1576 g_raid_start_request(bp);
1577 else
1578 g_raid_disk_done_request(bp);
1579 } else if (rv == EWOULDBLOCK) {
1580 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1581 g_raid_clean(vol, -1);
1582 if (bioq_first(&vol->v_inflight) == NULL &&
1583 vol->v_tr) {
1584 t.tv_sec = g_raid_idle_threshold / 1000000;
1585 t.tv_usec = g_raid_idle_threshold % 1000000;
1586 timevaladd(&t, &vol->v_last_done);
1587 getmicrouptime(&now);
1588 if (timevalcmp(&t, &now, <= )) {
1589 G_RAID_TR_IDLE(vol->v_tr);
1590 vol->v_last_done = now;
1591 }
1592 }
1593 }
1594 }
1595 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
1596 g_raid_destroy_node(sc, 1); /* May not return. */
1597 }
1598 }
1599
1600 static void
1601 g_raid_poll(struct g_raid_softc *sc)
1602 {
1603 struct g_raid_event *ep;
1604 struct bio *bp;
1605
1606 sx_xlock(&sc->sc_lock);
1607 mtx_lock(&sc->sc_queue_mtx);
1608 /*
1609 * First take a look at events.
1610 * This is important to handle events before any I/O requests.
1611 */
1612 ep = TAILQ_FIRST(&sc->sc_events);
1613 if (ep != NULL) {
1614 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1615 mtx_unlock(&sc->sc_queue_mtx);
1616 g_raid_handle_event(sc, ep);
1617 goto out;
1618 }
1619 bp = bioq_takefirst(&sc->sc_queue);
1620 if (bp != NULL) {
1621 mtx_unlock(&sc->sc_queue_mtx);
1622 if (bp->bio_from == NULL ||
1623 bp->bio_from->geom != sc->sc_geom)
1624 g_raid_start_request(bp);
1625 else
1626 g_raid_disk_done_request(bp);
1627 }
1628 out:
1629 sx_xunlock(&sc->sc_lock);
1630 }
1631
1632 static void
1633 g_raid_launch_provider(struct g_raid_volume *vol)
1634 {
1635 struct g_raid_disk *disk;
1636 struct g_raid_softc *sc;
1637 struct g_provider *pp;
1638 char name[G_RAID_MAX_VOLUMENAME];
1639 off_t off;
1640
1641 sc = vol->v_softc;
1642 sx_assert(&sc->sc_lock, SX_LOCKED);
1643
1644 g_topology_lock();
1645 /* Try to name provider with volume name. */
1646 snprintf(name, sizeof(name), "raid/%s", vol->v_name);
1647 if (g_raid_name_format == 0 || vol->v_name[0] == 0 ||
1648 g_provider_by_name(name) != NULL) {
1649 /* Otherwise use sequential volume number. */
1650 snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id);
1651 }
1652 pp = g_new_providerf(sc->sc_geom, "%s", name);
1653 pp->private = vol;
1654 pp->mediasize = vol->v_mediasize;
1655 pp->sectorsize = vol->v_sectorsize;
1656 pp->stripesize = 0;
1657 pp->stripeoffset = 0;
1658 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1659 vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
1660 vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE ||
1661 vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) {
1662 if ((disk = vol->v_subdisks[0].sd_disk) != NULL &&
1663 disk->d_consumer != NULL &&
1664 disk->d_consumer->provider != NULL) {
1665 pp->stripesize = disk->d_consumer->provider->stripesize;
1666 off = disk->d_consumer->provider->stripeoffset;
1667 pp->stripeoffset = off + vol->v_subdisks[0].sd_offset;
1668 if (off > 0)
1669 pp->stripeoffset %= off;
1670 }
1671 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) {
1672 pp->stripesize *= (vol->v_disks_count - 1);
1673 pp->stripeoffset *= (vol->v_disks_count - 1);
1674 }
1675 } else
1676 pp->stripesize = vol->v_strip_size;
1677 vol->v_provider = pp;
1678 g_error_provider(pp, 0);
1679 g_topology_unlock();
1680 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.",
1681 pp->name, vol->v_name);
1682 }
1683
1684 static void
1685 g_raid_destroy_provider(struct g_raid_volume *vol)
1686 {
1687 struct g_raid_softc *sc;
1688 struct g_provider *pp;
1689 struct bio *bp, *tmp;
1690
1691 g_topology_assert_not();
1692 sc = vol->v_softc;
1693 pp = vol->v_provider;
1694 KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name));
1695
1696 g_topology_lock();
1697 g_error_provider(pp, ENXIO);
1698 mtx_lock(&sc->sc_queue_mtx);
1699 TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) {
1700 if (bp->bio_to != pp)
1701 continue;
1702 bioq_remove(&sc->sc_queue, bp);
1703 g_io_deliver(bp, ENXIO);
1704 }
1705 mtx_unlock(&sc->sc_queue_mtx);
1706 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.",
1707 pp->name, vol->v_name);
1708 g_wither_provider(pp, ENXIO);
1709 g_topology_unlock();
1710 vol->v_provider = NULL;
1711 }
1712
1713 /*
1714 * Update device state.
1715 */
1716 static int
1717 g_raid_update_volume(struct g_raid_volume *vol, u_int event)
1718 {
1719 struct g_raid_softc *sc;
1720
1721 sc = vol->v_softc;
1722 sx_assert(&sc->sc_lock, SX_XLOCKED);
1723
1724 G_RAID_DEBUG1(2, sc, "Event %s for volume %s.",
1725 g_raid_volume_event2str(event),
1726 vol->v_name);
1727 switch (event) {
1728 case G_RAID_VOLUME_E_DOWN:
1729 if (vol->v_provider != NULL)
1730 g_raid_destroy_provider(vol);
1731 break;
1732 case G_RAID_VOLUME_E_UP:
1733 if (vol->v_provider == NULL)
1734 g_raid_launch_provider(vol);
1735 break;
1736 case G_RAID_VOLUME_E_START:
1737 if (vol->v_tr)
1738 G_RAID_TR_START(vol->v_tr);
1739 return (0);
1740 default:
1741 if (sc->sc_md)
1742 G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event);
1743 return (0);
1744 }
1745
1746 /* Manage root mount release. */
1747 if (vol->v_starting) {
1748 vol->v_starting = 0;
1749 G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount);
1750 root_mount_rel(vol->v_rootmount);
1751 vol->v_rootmount = NULL;
1752 }
1753 if (vol->v_stopping && vol->v_provider_open == 0)
1754 g_raid_destroy_volume(vol);
1755 return (0);
1756 }
1757
1758 /*
1759 * Update subdisk state.
1760 */
1761 static int
1762 g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event)
1763 {
1764 struct g_raid_softc *sc;
1765 struct g_raid_volume *vol;
1766
1767 sc = sd->sd_softc;
1768 vol = sd->sd_volume;
1769 sx_assert(&sc->sc_lock, SX_XLOCKED);
1770
1771 G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.",
1772 g_raid_subdisk_event2str(event),
1773 vol->v_name, sd->sd_pos,
1774 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
1775 if (vol->v_tr)
1776 G_RAID_TR_EVENT(vol->v_tr, sd, event);
1777
1778 return (0);
1779 }
1780
1781 /*
1782 * Update disk state.
1783 */
1784 static int
1785 g_raid_update_disk(struct g_raid_disk *disk, u_int event)
1786 {
1787 struct g_raid_softc *sc;
1788
1789 sc = disk->d_softc;
1790 sx_assert(&sc->sc_lock, SX_XLOCKED);
1791
1792 G_RAID_DEBUG1(2, sc, "Event %s for disk %s.",
1793 g_raid_disk_event2str(event),
1794 g_raid_get_diskname(disk));
1795
1796 if (sc->sc_md)
1797 G_RAID_MD_EVENT(sc->sc_md, disk, event);
1798 return (0);
1799 }
1800
1801 /*
1802 * Node event.
1803 */
1804 static int
1805 g_raid_update_node(struct g_raid_softc *sc, u_int event)
1806 {
1807 sx_assert(&sc->sc_lock, SX_XLOCKED);
1808
1809 G_RAID_DEBUG1(2, sc, "Event %s for the array.",
1810 g_raid_node_event2str(event));
1811
1812 if (event == G_RAID_NODE_E_WAKE)
1813 return (0);
1814 if (sc->sc_md)
1815 G_RAID_MD_EVENT(sc->sc_md, NULL, event);
1816 return (0);
1817 }
1818
1819 static int
1820 g_raid_access(struct g_provider *pp, int acr, int acw, int ace)
1821 {
1822 struct g_raid_volume *vol;
1823 struct g_raid_softc *sc;
1824 int dcw, opens, error = 0;
1825
1826 g_topology_assert();
1827 sc = pp->geom->softc;
1828 vol = pp->private;
1829 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
1830 KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name));
1831
1832 G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
1833 acr, acw, ace);
1834 dcw = pp->acw + acw;
1835
1836 g_topology_unlock();
1837 sx_xlock(&sc->sc_lock);
1838 /* Deny new opens while dying. */
1839 if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) {
1840 error = ENXIO;
1841 goto out;
1842 }
1843 if (dcw == 0)
1844 g_raid_clean(vol, dcw);
1845 vol->v_provider_open += acr + acw + ace;
1846 /* Handle delayed node destruction. */
1847 if (sc->sc_stopping == G_RAID_DESTROY_DELAYED &&
1848 vol->v_provider_open == 0) {
1849 /* Count open volumes. */
1850 opens = g_raid_nopens(sc);
1851 if (opens == 0) {
1852 sc->sc_stopping = G_RAID_DESTROY_HARD;
1853 /* Wake up worker to make it selfdestruct. */
1854 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1855 }
1856 }
1857 /* Handle open volume destruction. */
1858 if (vol->v_stopping && vol->v_provider_open == 0)
1859 g_raid_destroy_volume(vol);
1860 out:
1861 sx_xunlock(&sc->sc_lock);
1862 g_topology_lock();
1863 return (error);
1864 }
1865
1866 struct g_raid_softc *
1867 g_raid_create_node(struct g_class *mp,
1868 const char *name, struct g_raid_md_object *md)
1869 {
1870 struct g_raid_softc *sc;
1871 struct g_geom *gp;
1872 int error;
1873
1874 g_topology_assert();
1875 G_RAID_DEBUG(1, "Creating array %s.", name);
1876
1877 gp = g_new_geomf(mp, "%s", name);
1878 sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
1879 gp->start = g_raid_start;
1880 gp->orphan = g_raid_orphan;
1881 gp->access = g_raid_access;
1882 gp->dumpconf = g_raid_dumpconf;
1883
1884 sc->sc_md = md;
1885 sc->sc_geom = gp;
1886 sc->sc_flags = 0;
1887 TAILQ_INIT(&sc->sc_volumes);
1888 TAILQ_INIT(&sc->sc_disks);
1889 sx_init(&sc->sc_lock, "graid:lock");
1890 mtx_init(&sc->sc_queue_mtx, "graid:queue", NULL, MTX_DEF);
1891 TAILQ_INIT(&sc->sc_events);
1892 bioq_init(&sc->sc_queue);
1893 gp->softc = sc;
1894 error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0,
1895 "g_raid %s", name);
1896 if (error != 0) {
1897 G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name);
1898 mtx_destroy(&sc->sc_queue_mtx);
1899 sx_destroy(&sc->sc_lock);
1900 g_destroy_geom(sc->sc_geom);
1901 free(sc, M_RAID);
1902 return (NULL);
1903 }
1904
1905 G_RAID_DEBUG1(0, sc, "Array %s created.", name);
1906 return (sc);
1907 }
1908
1909 struct g_raid_volume *
1910 g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id)
1911 {
1912 struct g_raid_volume *vol, *vol1;
1913 int i;
1914
1915 G_RAID_DEBUG1(1, sc, "Creating volume %s.", name);
1916 vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO);
1917 vol->v_softc = sc;
1918 strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME);
1919 vol->v_state = G_RAID_VOLUME_S_STARTING;
1920 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
1921 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN;
1922 vol->v_rotate_parity = 1;
1923 bioq_init(&vol->v_inflight);
1924 bioq_init(&vol->v_locked);
1925 LIST_INIT(&vol->v_locks);
1926 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
1927 vol->v_subdisks[i].sd_softc = sc;
1928 vol->v_subdisks[i].sd_volume = vol;
1929 vol->v_subdisks[i].sd_pos = i;
1930 vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE;
1931 }
1932
1933 /* Find free ID for this volume. */
1934 g_topology_lock();
1935 vol1 = vol;
1936 if (id >= 0) {
1937 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1938 if (vol1->v_global_id == id)
1939 break;
1940 }
1941 }
1942 if (vol1 != NULL) {
1943 for (id = 0; ; id++) {
1944 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1945 if (vol1->v_global_id == id)
1946 break;
1947 }
1948 if (vol1 == NULL)
1949 break;
1950 }
1951 }
1952 vol->v_global_id = id;
1953 LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next);
1954 g_topology_unlock();
1955
1956 /* Delay root mounting. */
1957 vol->v_rootmount = root_mount_hold("GRAID");
1958 G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount);
1959 vol->v_starting = 1;
1960 TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
1961 return (vol);
1962 }
1963
1964 struct g_raid_disk *
1965 g_raid_create_disk(struct g_raid_softc *sc)
1966 {
1967 struct g_raid_disk *disk;
1968
1969 G_RAID_DEBUG1(1, sc, "Creating disk.");
1970 disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
1971 disk->d_softc = sc;
1972 disk->d_state = G_RAID_DISK_S_NONE;
1973 TAILQ_INIT(&disk->d_subdisks);
1974 TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next);
1975 return (disk);
1976 }
1977
1978 int g_raid_start_volume(struct g_raid_volume *vol)
1979 {
1980 struct g_raid_tr_class *class;
1981 struct g_raid_tr_object *obj;
1982 int status;
1983
1984 G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name);
1985 LIST_FOREACH(class, &g_raid_tr_classes, trc_list) {
1986 if (!class->trc_enable)
1987 continue;
1988 G_RAID_DEBUG1(2, vol->v_softc,
1989 "Tasting volume %s for %s transformation.",
1990 vol->v_name, class->name);
1991 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
1992 M_WAITOK);
1993 obj->tro_class = class;
1994 obj->tro_volume = vol;
1995 status = G_RAID_TR_TASTE(obj, vol);
1996 if (status != G_RAID_TR_TASTE_FAIL)
1997 break;
1998 kobj_delete((kobj_t)obj, M_RAID);
1999 }
2000 if (class == NULL) {
2001 G_RAID_DEBUG1(0, vol->v_softc,
2002 "No transformation module found for %s.",
2003 vol->v_name);
2004 vol->v_tr = NULL;
2005 g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED);
2006 g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN,
2007 G_RAID_EVENT_VOLUME);
2008 return (-1);
2009 }
2010 G_RAID_DEBUG1(2, vol->v_softc,
2011 "Transformation module %s chosen for %s.",
2012 class->name, vol->v_name);
2013 vol->v_tr = obj;
2014 return (0);
2015 }
2016
2017 int
2018 g_raid_destroy_node(struct g_raid_softc *sc, int worker)
2019 {
2020 struct g_raid_volume *vol, *tmpv;
2021 struct g_raid_disk *disk, *tmpd;
2022 int error = 0;
2023
2024 sc->sc_stopping = G_RAID_DESTROY_HARD;
2025 TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
2026 if (g_raid_destroy_volume(vol))
2027 error = EBUSY;
2028 }
2029 if (error)
2030 return (error);
2031 TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
2032 if (g_raid_destroy_disk(disk))
2033 error = EBUSY;
2034 }
2035 if (error)
2036 return (error);
2037 if (sc->sc_md) {
2038 G_RAID_MD_FREE(sc->sc_md);
2039 kobj_delete((kobj_t)sc->sc_md, M_RAID);
2040 sc->sc_md = NULL;
2041 }
2042 if (sc->sc_geom != NULL) {
2043 G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name);
2044 g_topology_lock();
2045 sc->sc_geom->softc = NULL;
2046 g_wither_geom(sc->sc_geom, ENXIO);
2047 g_topology_unlock();
2048 sc->sc_geom = NULL;
2049 } else
2050 G_RAID_DEBUG(1, "Array destroyed.");
2051 if (worker) {
2052 g_raid_event_cancel(sc, sc);
2053 mtx_destroy(&sc->sc_queue_mtx);
2054 sx_xunlock(&sc->sc_lock);
2055 sx_destroy(&sc->sc_lock);
2056 wakeup(&sc->sc_stopping);
2057 free(sc, M_RAID);
2058 curthread->td_pflags &= ~TDP_GEOM;
2059 G_RAID_DEBUG(1, "Thread exiting.");
2060 kproc_exit(0);
2061 } else {
2062 /* Wake up worker to make it selfdestruct. */
2063 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2064 }
2065 return (0);
2066 }
2067
2068 int
2069 g_raid_destroy_volume(struct g_raid_volume *vol)
2070 {
2071 struct g_raid_softc *sc;
2072 struct g_raid_disk *disk;
2073 int i;
2074
2075 sc = vol->v_softc;
2076 G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name);
2077 vol->v_stopping = 1;
2078 if (vol->v_state != G_RAID_VOLUME_S_STOPPED) {
2079 if (vol->v_tr) {
2080 G_RAID_TR_STOP(vol->v_tr);
2081 return (EBUSY);
2082 } else
2083 vol->v_state = G_RAID_VOLUME_S_STOPPED;
2084 }
2085 if (g_raid_event_check(sc, vol) != 0)
2086 return (EBUSY);
2087 if (vol->v_provider != NULL)
2088 return (EBUSY);
2089 if (vol->v_provider_open != 0)
2090 return (EBUSY);
2091 if (vol->v_tr) {
2092 G_RAID_TR_FREE(vol->v_tr);
2093 kobj_delete((kobj_t)vol->v_tr, M_RAID);
2094 vol->v_tr = NULL;
2095 }
2096 if (vol->v_rootmount)
2097 root_mount_rel(vol->v_rootmount);
2098 g_topology_lock();
2099 LIST_REMOVE(vol, v_global_next);
2100 g_topology_unlock();
2101 TAILQ_REMOVE(&sc->sc_volumes, vol, v_next);
2102 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
2103 g_raid_event_cancel(sc, &vol->v_subdisks[i]);
2104 disk = vol->v_subdisks[i].sd_disk;
2105 if (disk == NULL)
2106 continue;
2107 TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
2108 }
2109 G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name);
2110 if (sc->sc_md)
2111 G_RAID_MD_FREE_VOLUME(sc->sc_md, vol);
2112 g_raid_event_cancel(sc, vol);
2113 free(vol, M_RAID);
2114 if (sc->sc_stopping == G_RAID_DESTROY_HARD) {
2115 /* Wake up worker to let it selfdestruct. */
2116 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2117 }
2118 return (0);
2119 }
2120
2121 int
2122 g_raid_destroy_disk(struct g_raid_disk *disk)
2123 {
2124 struct g_raid_softc *sc;
2125 struct g_raid_subdisk *sd, *tmp;
2126
2127 sc = disk->d_softc;
2128 G_RAID_DEBUG1(2, sc, "Destroying disk.");
2129 if (disk->d_consumer) {
2130 g_raid_kill_consumer(sc, disk->d_consumer);
2131 disk->d_consumer = NULL;
2132 }
2133 TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
2134 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
2135 g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
2136 G_RAID_EVENT_SUBDISK);
2137 TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next);
2138 sd->sd_disk = NULL;
2139 }
2140 TAILQ_REMOVE(&sc->sc_disks, disk, d_next);
2141 if (sc->sc_md)
2142 G_RAID_MD_FREE_DISK(sc->sc_md, disk);
2143 g_raid_event_cancel(sc, disk);
2144 free(disk, M_RAID);
2145 return (0);
2146 }
2147
2148 int
2149 g_raid_destroy(struct g_raid_softc *sc, int how)
2150 {
2151 int opens;
2152
2153 g_topology_assert_not();
2154 if (sc == NULL)
2155 return (ENXIO);
2156 sx_assert(&sc->sc_lock, SX_XLOCKED);
2157
2158 /* Count open volumes. */
2159 opens = g_raid_nopens(sc);
2160
2161 /* React on some opened volumes. */
2162 if (opens > 0) {
2163 switch (how) {
2164 case G_RAID_DESTROY_SOFT:
2165 G_RAID_DEBUG1(1, sc,
2166 "%d volumes are still open.",
2167 opens);
2168 return (EBUSY);
2169 case G_RAID_DESTROY_DELAYED:
2170 G_RAID_DEBUG1(1, sc,
2171 "Array will be destroyed on last close.");
2172 sc->sc_stopping = G_RAID_DESTROY_DELAYED;
2173 return (EBUSY);
2174 case G_RAID_DESTROY_HARD:
2175 G_RAID_DEBUG1(1, sc,
2176 "%d volumes are still open.",
2177 opens);
2178 }
2179 }
2180
2181 /* Mark node for destruction. */
2182 sc->sc_stopping = G_RAID_DESTROY_HARD;
2183 /* Wake up worker to let it selfdestruct. */
2184 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2185 /* Sleep until node destroyed. */
2186 sx_sleep(&sc->sc_stopping, &sc->sc_lock,
2187 PRIBIO | PDROP, "r:destroy", 0);
2188 return (0);
2189 }
2190
2191 static void
2192 g_raid_taste_orphan(struct g_consumer *cp)
2193 {
2194
2195 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2196 cp->provider->name));
2197 }
2198
2199 static struct g_geom *
2200 g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2201 {
2202 struct g_consumer *cp;
2203 struct g_geom *gp, *geom;
2204 struct g_raid_md_class *class;
2205 struct g_raid_md_object *obj;
2206 int status;
2207
2208 g_topology_assert();
2209 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2210 if (!g_raid_enable)
2211 return (NULL);
2212 G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
2213
2214 gp = g_new_geomf(mp, "raid:taste");
2215 /*
2216 * This orphan function should be never called.
2217 */
2218 gp->orphan = g_raid_taste_orphan;
2219 cp = g_new_consumer(gp);
2220 g_attach(cp, pp);
2221
2222 geom = NULL;
2223 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2224 if (!class->mdc_enable)
2225 continue;
2226 G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.",
2227 pp->name, class->name);
2228 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2229 M_WAITOK);
2230 obj->mdo_class = class;
2231 status = G_RAID_MD_TASTE(obj, mp, cp, &geom);
2232 if (status != G_RAID_MD_TASTE_NEW)
2233 kobj_delete((kobj_t)obj, M_RAID);
2234 if (status != G_RAID_MD_TASTE_FAIL)
2235 break;
2236 }
2237
2238 g_detach(cp);
2239 g_destroy_consumer(cp);
2240 g_destroy_geom(gp);
2241 G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name);
2242 return (geom);
2243 }
2244
2245 int
2246 g_raid_create_node_format(const char *format, struct gctl_req *req,
2247 struct g_geom **gp)
2248 {
2249 struct g_raid_md_class *class;
2250 struct g_raid_md_object *obj;
2251 int status;
2252
2253 G_RAID_DEBUG(2, "Creating array for %s metadata.", format);
2254 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2255 if (strcasecmp(class->name, format) == 0)
2256 break;
2257 }
2258 if (class == NULL) {
2259 G_RAID_DEBUG(1, "No support for %s metadata.", format);
2260 return (G_RAID_MD_TASTE_FAIL);
2261 }
2262 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2263 M_WAITOK);
2264 obj->mdo_class = class;
2265 status = G_RAID_MD_CREATE_REQ(obj, &g_raid_class, req, gp);
2266 if (status != G_RAID_MD_TASTE_NEW)
2267 kobj_delete((kobj_t)obj, M_RAID);
2268 return (status);
2269 }
2270
2271 static int
2272 g_raid_destroy_geom(struct gctl_req *req __unused,
2273 struct g_class *mp __unused, struct g_geom *gp)
2274 {
2275 struct g_raid_softc *sc;
2276 int error;
2277
2278 g_topology_unlock();
2279 sc = gp->softc;
2280 sx_xlock(&sc->sc_lock);
2281 g_cancel_event(sc);
2282 error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT);
2283 if (error != 0)
2284 sx_xunlock(&sc->sc_lock);
2285 g_topology_lock();
2286 return (error);
2287 }
2288
2289 void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
2290 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2291 {
2292
2293 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
2294 return;
2295 if (sc->sc_md)
2296 G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk);
2297 }
2298
2299 void g_raid_fail_disk(struct g_raid_softc *sc,
2300 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2301 {
2302
2303 if (disk == NULL)
2304 disk = sd->sd_disk;
2305 if (disk == NULL) {
2306 G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!");
2307 return;
2308 }
2309 if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
2310 G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a "
2311 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
2312 return;
2313 }
2314 if (sc->sc_md)
2315 G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk);
2316 }
2317
2318 static void
2319 g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2320 struct g_consumer *cp, struct g_provider *pp)
2321 {
2322 struct g_raid_softc *sc;
2323 struct g_raid_volume *vol;
2324 struct g_raid_subdisk *sd;
2325 struct g_raid_disk *disk;
2326 int i, s;
2327
2328 g_topology_assert();
2329
2330 sc = gp->softc;
2331 if (sc == NULL)
2332 return;
2333 if (pp != NULL) {
2334 vol = pp->private;
2335 g_topology_unlock();
2336 sx_xlock(&sc->sc_lock);
2337 sbuf_printf(sb, "%s<Label>%s</Label>\n", indent,
2338 vol->v_name);
2339 sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent,
2340 g_raid_volume_level2str(vol->v_raid_level,
2341 vol->v_raid_level_qualifier));
2342 sbuf_printf(sb,
2343 "%s<Transformation>%s</Transformation>\n", indent,
2344 vol->v_tr ? vol->v_tr->tro_class->name : "NONE");
2345 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2346 vol->v_disks_count);
2347 sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent,
2348 vol->v_strip_size);
2349 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2350 g_raid_volume_state2str(vol->v_state));
2351 sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent,
2352 vol->v_dirty ? "Yes" : "No");
2353 sbuf_printf(sb, "%s<Subdisks>", indent);
2354 for (i = 0; i < vol->v_disks_count; i++) {
2355 sd = &vol->v_subdisks[i];
2356 if (sd->sd_disk != NULL &&
2357 sd->sd_disk->d_consumer != NULL) {
2358 sbuf_printf(sb, "%s ",
2359 g_raid_get_diskname(sd->sd_disk));
2360 } else {
2361 sbuf_printf(sb, "NONE ");
2362 }
2363 sbuf_printf(sb, "(%s",
2364 g_raid_subdisk_state2str(sd->sd_state));
2365 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2366 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2367 sbuf_printf(sb, " %d%%",
2368 (int)(sd->sd_rebuild_pos * 100 /
2369 sd->sd_size));
2370 }
2371 sbuf_printf(sb, ")");
2372 if (i + 1 < vol->v_disks_count)
2373 sbuf_printf(sb, ", ");
2374 }
2375 sbuf_printf(sb, "</Subdisks>\n");
2376 sx_xunlock(&sc->sc_lock);
2377 g_topology_lock();
2378 } else if (cp != NULL) {
2379 disk = cp->private;
2380 if (disk == NULL)
2381 return;
2382 g_topology_unlock();
2383 sx_xlock(&sc->sc_lock);
2384 sbuf_printf(sb, "%s<State>%s", indent,
2385 g_raid_disk_state2str(disk->d_state));
2386 if (!TAILQ_EMPTY(&disk->d_subdisks)) {
2387 sbuf_printf(sb, " (");
2388 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2389 sbuf_printf(sb, "%s",
2390 g_raid_subdisk_state2str(sd->sd_state));
2391 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2392 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2393 sbuf_printf(sb, " %d%%",
2394 (int)(sd->sd_rebuild_pos * 100 /
2395 sd->sd_size));
2396 }
2397 if (TAILQ_NEXT(sd, sd_next))
2398 sbuf_printf(sb, ", ");
2399 }
2400 sbuf_printf(sb, ")");
2401 }
2402 sbuf_printf(sb, "</State>\n");
2403 sbuf_printf(sb, "%s<Subdisks>", indent);
2404 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2405 sbuf_printf(sb, "r%d(%s):%d@%ju",
2406 sd->sd_volume->v_global_id,
2407 sd->sd_volume->v_name,
2408 sd->sd_pos, sd->sd_offset);
2409 if (TAILQ_NEXT(sd, sd_next))
2410 sbuf_printf(sb, ", ");
2411 }
2412 sbuf_printf(sb, "</Subdisks>\n");
2413 sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent,
2414 disk->d_read_errs);
2415 sx_xunlock(&sc->sc_lock);
2416 g_topology_lock();
2417 } else {
2418 g_topology_unlock();
2419 sx_xlock(&sc->sc_lock);
2420 if (sc->sc_md) {
2421 sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent,
2422 sc->sc_md->mdo_class->name);
2423 }
2424 if (!TAILQ_EMPTY(&sc->sc_volumes)) {
2425 s = 0xff;
2426 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
2427 if (vol->v_state < s)
2428 s = vol->v_state;
2429 }
2430 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2431 g_raid_volume_state2str(s));
2432 }
2433 sx_xunlock(&sc->sc_lock);
2434 g_topology_lock();
2435 }
2436 }
2437
2438 static void
2439 g_raid_shutdown_post_sync(void *arg, int howto)
2440 {
2441 struct g_class *mp;
2442 struct g_geom *gp, *gp2;
2443 struct g_raid_softc *sc;
2444 struct g_raid_volume *vol;
2445 int error;
2446
2447 mp = arg;
2448 DROP_GIANT();
2449 g_topology_lock();
2450 g_raid_shutdown = 1;
2451 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2452 if ((sc = gp->softc) == NULL)
2453 continue;
2454 g_topology_unlock();
2455 sx_xlock(&sc->sc_lock);
2456 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next)
2457 g_raid_clean(vol, -1);
2458 g_cancel_event(sc);
2459 error = g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
2460 if (error != 0)
2461 sx_xunlock(&sc->sc_lock);
2462 g_topology_lock();
2463 }
2464 g_topology_unlock();
2465 PICKUP_GIANT();
2466 }
2467
2468 static void
2469 g_raid_init(struct g_class *mp)
2470 {
2471
2472 g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
2473 g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
2474 if (g_raid_post_sync == NULL)
2475 G_RAID_DEBUG(0, "Warning! Cannot register shutdown event.");
2476 g_raid_started = 1;
2477 }
2478
2479 static void
2480 g_raid_fini(struct g_class *mp)
2481 {
2482
2483 if (g_raid_post_sync != NULL)
2484 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid_post_sync);
2485 g_raid_started = 0;
2486 }
2487
2488 int
2489 g_raid_md_modevent(module_t mod, int type, void *arg)
2490 {
2491 struct g_raid_md_class *class, *c, *nc;
2492 int error;
2493
2494 error = 0;
2495 class = arg;
2496 switch (type) {
2497 case MOD_LOAD:
2498 c = LIST_FIRST(&g_raid_md_classes);
2499 if (c == NULL || c->mdc_priority > class->mdc_priority)
2500 LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list);
2501 else {
2502 while ((nc = LIST_NEXT(c, mdc_list)) != NULL &&
2503 nc->mdc_priority < class->mdc_priority)
2504 c = nc;
2505 LIST_INSERT_AFTER(c, class, mdc_list);
2506 }
2507 if (g_raid_started)
2508 g_retaste(&g_raid_class);
2509 break;
2510 case MOD_UNLOAD:
2511 LIST_REMOVE(class, mdc_list);
2512 break;
2513 default:
2514 error = EOPNOTSUPP;
2515 break;
2516 }
2517
2518 return (error);
2519 }
2520
2521 int
2522 g_raid_tr_modevent(module_t mod, int type, void *arg)
2523 {
2524 struct g_raid_tr_class *class, *c, *nc;
2525 int error;
2526
2527 error = 0;
2528 class = arg;
2529 switch (type) {
2530 case MOD_LOAD:
2531 c = LIST_FIRST(&g_raid_tr_classes);
2532 if (c == NULL || c->trc_priority > class->trc_priority)
2533 LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list);
2534 else {
2535 while ((nc = LIST_NEXT(c, trc_list)) != NULL &&
2536 nc->trc_priority < class->trc_priority)
2537 c = nc;
2538 LIST_INSERT_AFTER(c, class, trc_list);
2539 }
2540 break;
2541 case MOD_UNLOAD:
2542 LIST_REMOVE(class, trc_list);
2543 break;
2544 default:
2545 error = EOPNOTSUPP;
2546 break;
2547 }
2548
2549 return (error);
2550 }
2551
2552 /*
2553 * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid)
2554 * to reduce module priority, allowing submodules to register them first.
2555 */
2556 static moduledata_t g_raid_mod = {
2557 "g_raid",
2558 g_modevent,
2559 &g_raid_class
2560 };
2561 DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD);
2562 MODULE_VERSION(geom_raid, 0);
Cache object: ee9faf97db661eefc5363f4dac383640
|