FreeBSD/Linux Kernel Cross Reference
sys/geom/raid/g_raid.c
1 /*-
2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.1/sys/geom/raid/g_raid.c 240558 2012-09-16 11:02:22Z mav $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sbuf.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/eventhandler.h>
42 #include <vm/uma.h>
43 #include <geom/geom.h>
44 #include <sys/proc.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <geom/raid/g_raid.h>
48 #include "g_raid_md_if.h"
49 #include "g_raid_tr_if.h"
50
51 static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data");
52
53 SYSCTL_DECL(_kern_geom);
54 SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff");
55 int g_raid_enable = 1;
56 TUNABLE_INT("kern.geom.raid.enable", &g_raid_enable);
57 SYSCTL_INT(_kern_geom_raid, OID_AUTO, enable, CTLFLAG_RW,
58 &g_raid_enable, 0, "Enable on-disk metadata taste");
59 u_int g_raid_aggressive_spare = 0;
60 TUNABLE_INT("kern.geom.raid.aggressive_spare", &g_raid_aggressive_spare);
61 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RW,
62 &g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
63 u_int g_raid_debug = 0;
64 TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug);
65 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0,
66 "Debug level");
67 int g_raid_read_err_thresh = 10;
68 TUNABLE_INT("kern.geom.raid.read_err_thresh", &g_raid_read_err_thresh);
69 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RW,
70 &g_raid_read_err_thresh, 0,
71 "Number of read errors equated to disk failure");
72 u_int g_raid_start_timeout = 30;
73 TUNABLE_INT("kern.geom.raid.start_timeout", &g_raid_start_timeout);
74 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RW,
75 &g_raid_start_timeout, 0,
76 "Time to wait for all array components");
77 static u_int g_raid_clean_time = 5;
78 TUNABLE_INT("kern.geom.raid.clean_time", &g_raid_clean_time);
79 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RW,
80 &g_raid_clean_time, 0, "Mark volume as clean when idling");
81 static u_int g_raid_disconnect_on_failure = 1;
82 TUNABLE_INT("kern.geom.raid.disconnect_on_failure",
83 &g_raid_disconnect_on_failure);
84 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
85 &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
86 static u_int g_raid_name_format = 0;
87 TUNABLE_INT("kern.geom.raid.name_format", &g_raid_name_format);
88 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RW,
89 &g_raid_name_format, 0, "Providers name format.");
90 static u_int g_raid_idle_threshold = 1000000;
91 TUNABLE_INT("kern.geom.raid.idle_threshold", &g_raid_idle_threshold);
92 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RW,
93 &g_raid_idle_threshold, 1000000,
94 "Time in microseconds to consider a volume idle.");
95
96 #define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \
97 G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
98 rv = msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
99 G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
100 } while (0)
101
102 LIST_HEAD(, g_raid_md_class) g_raid_md_classes =
103 LIST_HEAD_INITIALIZER(g_raid_md_classes);
104
105 LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes =
106 LIST_HEAD_INITIALIZER(g_raid_tr_classes);
107
108 LIST_HEAD(, g_raid_volume) g_raid_volumes =
109 LIST_HEAD_INITIALIZER(g_raid_volumes);
110
111 static eventhandler_tag g_raid_pre_sync = NULL;
112 static int g_raid_started = 0;
113
114 static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
115 struct g_geom *gp);
116 static g_taste_t g_raid_taste;
117 static void g_raid_init(struct g_class *mp);
118 static void g_raid_fini(struct g_class *mp);
119
120 struct g_class g_raid_class = {
121 .name = G_RAID_CLASS_NAME,
122 .version = G_VERSION,
123 .ctlreq = g_raid_ctl,
124 .taste = g_raid_taste,
125 .destroy_geom = g_raid_destroy_geom,
126 .init = g_raid_init,
127 .fini = g_raid_fini
128 };
129
130 static void g_raid_destroy_provider(struct g_raid_volume *vol);
131 static int g_raid_update_disk(struct g_raid_disk *disk, u_int event);
132 static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event);
133 static int g_raid_update_volume(struct g_raid_volume *vol, u_int event);
134 static int g_raid_update_node(struct g_raid_softc *sc, u_int event);
135 static void g_raid_dumpconf(struct sbuf *sb, const char *indent,
136 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
137 static void g_raid_start(struct bio *bp);
138 static void g_raid_start_request(struct bio *bp);
139 static void g_raid_disk_done(struct bio *bp);
140 static void g_raid_poll(struct g_raid_softc *sc);
141
142 static const char *
143 g_raid_node_event2str(int event)
144 {
145
146 switch (event) {
147 case G_RAID_NODE_E_WAKE:
148 return ("WAKE");
149 case G_RAID_NODE_E_START:
150 return ("START");
151 default:
152 return ("INVALID");
153 }
154 }
155
156 const char *
157 g_raid_disk_state2str(int state)
158 {
159
160 switch (state) {
161 case G_RAID_DISK_S_NONE:
162 return ("NONE");
163 case G_RAID_DISK_S_OFFLINE:
164 return ("OFFLINE");
165 case G_RAID_DISK_S_FAILED:
166 return ("FAILED");
167 case G_RAID_DISK_S_STALE_FAILED:
168 return ("STALE_FAILED");
169 case G_RAID_DISK_S_SPARE:
170 return ("SPARE");
171 case G_RAID_DISK_S_STALE:
172 return ("STALE");
173 case G_RAID_DISK_S_ACTIVE:
174 return ("ACTIVE");
175 default:
176 return ("INVALID");
177 }
178 }
179
180 static const char *
181 g_raid_disk_event2str(int event)
182 {
183
184 switch (event) {
185 case G_RAID_DISK_E_DISCONNECTED:
186 return ("DISCONNECTED");
187 default:
188 return ("INVALID");
189 }
190 }
191
192 const char *
193 g_raid_subdisk_state2str(int state)
194 {
195
196 switch (state) {
197 case G_RAID_SUBDISK_S_NONE:
198 return ("NONE");
199 case G_RAID_SUBDISK_S_FAILED:
200 return ("FAILED");
201 case G_RAID_SUBDISK_S_NEW:
202 return ("NEW");
203 case G_RAID_SUBDISK_S_REBUILD:
204 return ("REBUILD");
205 case G_RAID_SUBDISK_S_UNINITIALIZED:
206 return ("UNINITIALIZED");
207 case G_RAID_SUBDISK_S_STALE:
208 return ("STALE");
209 case G_RAID_SUBDISK_S_RESYNC:
210 return ("RESYNC");
211 case G_RAID_SUBDISK_S_ACTIVE:
212 return ("ACTIVE");
213 default:
214 return ("INVALID");
215 }
216 }
217
218 static const char *
219 g_raid_subdisk_event2str(int event)
220 {
221
222 switch (event) {
223 case G_RAID_SUBDISK_E_NEW:
224 return ("NEW");
225 case G_RAID_SUBDISK_E_DISCONNECTED:
226 return ("DISCONNECTED");
227 default:
228 return ("INVALID");
229 }
230 }
231
232 const char *
233 g_raid_volume_state2str(int state)
234 {
235
236 switch (state) {
237 case G_RAID_VOLUME_S_STARTING:
238 return ("STARTING");
239 case G_RAID_VOLUME_S_BROKEN:
240 return ("BROKEN");
241 case G_RAID_VOLUME_S_DEGRADED:
242 return ("DEGRADED");
243 case G_RAID_VOLUME_S_SUBOPTIMAL:
244 return ("SUBOPTIMAL");
245 case G_RAID_VOLUME_S_OPTIMAL:
246 return ("OPTIMAL");
247 case G_RAID_VOLUME_S_UNSUPPORTED:
248 return ("UNSUPPORTED");
249 case G_RAID_VOLUME_S_STOPPED:
250 return ("STOPPED");
251 default:
252 return ("INVALID");
253 }
254 }
255
256 static const char *
257 g_raid_volume_event2str(int event)
258 {
259
260 switch (event) {
261 case G_RAID_VOLUME_E_UP:
262 return ("UP");
263 case G_RAID_VOLUME_E_DOWN:
264 return ("DOWN");
265 case G_RAID_VOLUME_E_START:
266 return ("START");
267 case G_RAID_VOLUME_E_STARTMD:
268 return ("STARTMD");
269 default:
270 return ("INVALID");
271 }
272 }
273
274 const char *
275 g_raid_volume_level2str(int level, int qual)
276 {
277
278 switch (level) {
279 case G_RAID_VOLUME_RL_RAID0:
280 return ("RAID0");
281 case G_RAID_VOLUME_RL_RAID1:
282 return ("RAID1");
283 case G_RAID_VOLUME_RL_RAID3:
284 if (qual == G_RAID_VOLUME_RLQ_R3P0)
285 return ("RAID3-P0");
286 if (qual == G_RAID_VOLUME_RLQ_R3PN)
287 return ("RAID3-PN");
288 return ("RAID3");
289 case G_RAID_VOLUME_RL_RAID4:
290 if (qual == G_RAID_VOLUME_RLQ_R4P0)
291 return ("RAID4-P0");
292 if (qual == G_RAID_VOLUME_RLQ_R4PN)
293 return ("RAID4-PN");
294 return ("RAID4");
295 case G_RAID_VOLUME_RL_RAID5:
296 if (qual == G_RAID_VOLUME_RLQ_R5RA)
297 return ("RAID5-RA");
298 if (qual == G_RAID_VOLUME_RLQ_R5RS)
299 return ("RAID5-RS");
300 if (qual == G_RAID_VOLUME_RLQ_R5LA)
301 return ("RAID5-LA");
302 if (qual == G_RAID_VOLUME_RLQ_R5LS)
303 return ("RAID5-LS");
304 return ("RAID5");
305 case G_RAID_VOLUME_RL_RAID6:
306 if (qual == G_RAID_VOLUME_RLQ_R6RA)
307 return ("RAID6-RA");
308 if (qual == G_RAID_VOLUME_RLQ_R6RS)
309 return ("RAID6-RS");
310 if (qual == G_RAID_VOLUME_RLQ_R6LA)
311 return ("RAID6-LA");
312 if (qual == G_RAID_VOLUME_RLQ_R6LS)
313 return ("RAID6-LS");
314 return ("RAID6");
315 case G_RAID_VOLUME_RL_RAIDMDF:
316 if (qual == G_RAID_VOLUME_RLQ_RMDFRA)
317 return ("RAIDMDF-RA");
318 if (qual == G_RAID_VOLUME_RLQ_RMDFRS)
319 return ("RAIDMDF-RS");
320 if (qual == G_RAID_VOLUME_RLQ_RMDFLA)
321 return ("RAIDMDF-LA");
322 if (qual == G_RAID_VOLUME_RLQ_RMDFLS)
323 return ("RAIDMDF-LS");
324 return ("RAIDMDF");
325 case G_RAID_VOLUME_RL_RAID1E:
326 if (qual == G_RAID_VOLUME_RLQ_R1EA)
327 return ("RAID1E-A");
328 if (qual == G_RAID_VOLUME_RLQ_R1EO)
329 return ("RAID1E-O");
330 return ("RAID1E");
331 case G_RAID_VOLUME_RL_SINGLE:
332 return ("SINGLE");
333 case G_RAID_VOLUME_RL_CONCAT:
334 return ("CONCAT");
335 case G_RAID_VOLUME_RL_RAID5E:
336 if (qual == G_RAID_VOLUME_RLQ_R5ERA)
337 return ("RAID5E-RA");
338 if (qual == G_RAID_VOLUME_RLQ_R5ERS)
339 return ("RAID5E-RS");
340 if (qual == G_RAID_VOLUME_RLQ_R5ELA)
341 return ("RAID5E-LA");
342 if (qual == G_RAID_VOLUME_RLQ_R5ELS)
343 return ("RAID5E-LS");
344 return ("RAID5E");
345 case G_RAID_VOLUME_RL_RAID5EE:
346 if (qual == G_RAID_VOLUME_RLQ_R5EERA)
347 return ("RAID5EE-RA");
348 if (qual == G_RAID_VOLUME_RLQ_R5EERS)
349 return ("RAID5EE-RS");
350 if (qual == G_RAID_VOLUME_RLQ_R5EELA)
351 return ("RAID5EE-LA");
352 if (qual == G_RAID_VOLUME_RLQ_R5EELS)
353 return ("RAID5EE-LS");
354 return ("RAID5EE");
355 case G_RAID_VOLUME_RL_RAID5R:
356 if (qual == G_RAID_VOLUME_RLQ_R5RRA)
357 return ("RAID5R-RA");
358 if (qual == G_RAID_VOLUME_RLQ_R5RRS)
359 return ("RAID5R-RS");
360 if (qual == G_RAID_VOLUME_RLQ_R5RLA)
361 return ("RAID5R-LA");
362 if (qual == G_RAID_VOLUME_RLQ_R5RLS)
363 return ("RAID5R-LS");
364 return ("RAID5E");
365 default:
366 return ("UNKNOWN");
367 }
368 }
369
370 int
371 g_raid_volume_str2level(const char *str, int *level, int *qual)
372 {
373
374 *level = G_RAID_VOLUME_RL_UNKNOWN;
375 *qual = G_RAID_VOLUME_RLQ_NONE;
376 if (strcasecmp(str, "RAID0") == 0)
377 *level = G_RAID_VOLUME_RL_RAID0;
378 else if (strcasecmp(str, "RAID1") == 0)
379 *level = G_RAID_VOLUME_RL_RAID1;
380 else if (strcasecmp(str, "RAID3-P0") == 0) {
381 *level = G_RAID_VOLUME_RL_RAID3;
382 *qual = G_RAID_VOLUME_RLQ_R3P0;
383 } else if (strcasecmp(str, "RAID3-PN") == 0 ||
384 strcasecmp(str, "RAID3") == 0) {
385 *level = G_RAID_VOLUME_RL_RAID3;
386 *qual = G_RAID_VOLUME_RLQ_R3PN;
387 } else if (strcasecmp(str, "RAID4-P0") == 0) {
388 *level = G_RAID_VOLUME_RL_RAID4;
389 *qual = G_RAID_VOLUME_RLQ_R4P0;
390 } else if (strcasecmp(str, "RAID4-PN") == 0 ||
391 strcasecmp(str, "RAID4") == 0) {
392 *level = G_RAID_VOLUME_RL_RAID4;
393 *qual = G_RAID_VOLUME_RLQ_R4PN;
394 } else if (strcasecmp(str, "RAID5-RA") == 0) {
395 *level = G_RAID_VOLUME_RL_RAID5;
396 *qual = G_RAID_VOLUME_RLQ_R5RA;
397 } else if (strcasecmp(str, "RAID5-RS") == 0) {
398 *level = G_RAID_VOLUME_RL_RAID5;
399 *qual = G_RAID_VOLUME_RLQ_R5RS;
400 } else if (strcasecmp(str, "RAID5") == 0 ||
401 strcasecmp(str, "RAID5-LA") == 0) {
402 *level = G_RAID_VOLUME_RL_RAID5;
403 *qual = G_RAID_VOLUME_RLQ_R5LA;
404 } else if (strcasecmp(str, "RAID5-LS") == 0) {
405 *level = G_RAID_VOLUME_RL_RAID5;
406 *qual = G_RAID_VOLUME_RLQ_R5LS;
407 } else if (strcasecmp(str, "RAID6-RA") == 0) {
408 *level = G_RAID_VOLUME_RL_RAID6;
409 *qual = G_RAID_VOLUME_RLQ_R6RA;
410 } else if (strcasecmp(str, "RAID6-RS") == 0) {
411 *level = G_RAID_VOLUME_RL_RAID6;
412 *qual = G_RAID_VOLUME_RLQ_R6RS;
413 } else if (strcasecmp(str, "RAID6") == 0 ||
414 strcasecmp(str, "RAID6-LA") == 0) {
415 *level = G_RAID_VOLUME_RL_RAID6;
416 *qual = G_RAID_VOLUME_RLQ_R6LA;
417 } else if (strcasecmp(str, "RAID6-LS") == 0) {
418 *level = G_RAID_VOLUME_RL_RAID6;
419 *qual = G_RAID_VOLUME_RLQ_R6LS;
420 } else if (strcasecmp(str, "RAIDMDF-RA") == 0) {
421 *level = G_RAID_VOLUME_RL_RAIDMDF;
422 *qual = G_RAID_VOLUME_RLQ_RMDFRA;
423 } else if (strcasecmp(str, "RAIDMDF-RS") == 0) {
424 *level = G_RAID_VOLUME_RL_RAIDMDF;
425 *qual = G_RAID_VOLUME_RLQ_RMDFRS;
426 } else if (strcasecmp(str, "RAIDMDF") == 0 ||
427 strcasecmp(str, "RAIDMDF-LA") == 0) {
428 *level = G_RAID_VOLUME_RL_RAIDMDF;
429 *qual = G_RAID_VOLUME_RLQ_RMDFLA;
430 } else if (strcasecmp(str, "RAIDMDF-LS") == 0) {
431 *level = G_RAID_VOLUME_RL_RAIDMDF;
432 *qual = G_RAID_VOLUME_RLQ_RMDFLS;
433 } else if (strcasecmp(str, "RAID10") == 0 ||
434 strcasecmp(str, "RAID1E") == 0 ||
435 strcasecmp(str, "RAID1E-A") == 0) {
436 *level = G_RAID_VOLUME_RL_RAID1E;
437 *qual = G_RAID_VOLUME_RLQ_R1EA;
438 } else if (strcasecmp(str, "RAID1E-O") == 0) {
439 *level = G_RAID_VOLUME_RL_RAID1E;
440 *qual = G_RAID_VOLUME_RLQ_R1EO;
441 } else if (strcasecmp(str, "SINGLE") == 0)
442 *level = G_RAID_VOLUME_RL_SINGLE;
443 else if (strcasecmp(str, "CONCAT") == 0)
444 *level = G_RAID_VOLUME_RL_CONCAT;
445 else if (strcasecmp(str, "RAID5E-RA") == 0) {
446 *level = G_RAID_VOLUME_RL_RAID5E;
447 *qual = G_RAID_VOLUME_RLQ_R5ERA;
448 } else if (strcasecmp(str, "RAID5E-RS") == 0) {
449 *level = G_RAID_VOLUME_RL_RAID5E;
450 *qual = G_RAID_VOLUME_RLQ_R5ERS;
451 } else if (strcasecmp(str, "RAID5E") == 0 ||
452 strcasecmp(str, "RAID5E-LA") == 0) {
453 *level = G_RAID_VOLUME_RL_RAID5E;
454 *qual = G_RAID_VOLUME_RLQ_R5ELA;
455 } else if (strcasecmp(str, "RAID5E-LS") == 0) {
456 *level = G_RAID_VOLUME_RL_RAID5E;
457 *qual = G_RAID_VOLUME_RLQ_R5ELS;
458 } else if (strcasecmp(str, "RAID5EE-RA") == 0) {
459 *level = G_RAID_VOLUME_RL_RAID5EE;
460 *qual = G_RAID_VOLUME_RLQ_R5EERA;
461 } else if (strcasecmp(str, "RAID5EE-RS") == 0) {
462 *level = G_RAID_VOLUME_RL_RAID5EE;
463 *qual = G_RAID_VOLUME_RLQ_R5EERS;
464 } else if (strcasecmp(str, "RAID5EE") == 0 ||
465 strcasecmp(str, "RAID5EE-LA") == 0) {
466 *level = G_RAID_VOLUME_RL_RAID5EE;
467 *qual = G_RAID_VOLUME_RLQ_R5EELA;
468 } else if (strcasecmp(str, "RAID5EE-LS") == 0) {
469 *level = G_RAID_VOLUME_RL_RAID5EE;
470 *qual = G_RAID_VOLUME_RLQ_R5EELS;
471 } else if (strcasecmp(str, "RAID5R-RA") == 0) {
472 *level = G_RAID_VOLUME_RL_RAID5R;
473 *qual = G_RAID_VOLUME_RLQ_R5RRA;
474 } else if (strcasecmp(str, "RAID5R-RS") == 0) {
475 *level = G_RAID_VOLUME_RL_RAID5R;
476 *qual = G_RAID_VOLUME_RLQ_R5RRS;
477 } else if (strcasecmp(str, "RAID5R") == 0 ||
478 strcasecmp(str, "RAID5R-LA") == 0) {
479 *level = G_RAID_VOLUME_RL_RAID5R;
480 *qual = G_RAID_VOLUME_RLQ_R5RLA;
481 } else if (strcasecmp(str, "RAID5R-LS") == 0) {
482 *level = G_RAID_VOLUME_RL_RAID5R;
483 *qual = G_RAID_VOLUME_RLQ_R5RLS;
484 } else
485 return (-1);
486 return (0);
487 }
488
489 const char *
490 g_raid_get_diskname(struct g_raid_disk *disk)
491 {
492
493 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
494 return ("[unknown]");
495 return (disk->d_consumer->provider->name);
496 }
497
498 void
499 g_raid_report_disk_state(struct g_raid_disk *disk)
500 {
501 struct g_raid_subdisk *sd;
502 int len, state;
503 uint32_t s;
504
505 if (disk->d_consumer == NULL)
506 return;
507 if (disk->d_state == G_RAID_DISK_S_FAILED ||
508 disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
509 s = G_STATE_FAILED;
510 } else {
511 state = G_RAID_SUBDISK_S_ACTIVE;
512 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
513 if (sd->sd_state < state)
514 state = sd->sd_state;
515 }
516 if (state == G_RAID_SUBDISK_S_FAILED)
517 s = G_STATE_FAILED;
518 else if (state == G_RAID_SUBDISK_S_NEW ||
519 state == G_RAID_SUBDISK_S_REBUILD)
520 s = G_STATE_REBUILD;
521 else if (state == G_RAID_SUBDISK_S_STALE ||
522 state == G_RAID_SUBDISK_S_RESYNC)
523 s = G_STATE_RESYNC;
524 else
525 s = G_STATE_ACTIVE;
526 }
527 len = sizeof(s);
528 g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
529 G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.",
530 g_raid_get_diskname(disk), s);
531 }
532
533 void
534 g_raid_change_disk_state(struct g_raid_disk *disk, int state)
535 {
536
537 G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.",
538 g_raid_get_diskname(disk),
539 g_raid_disk_state2str(disk->d_state),
540 g_raid_disk_state2str(state));
541 disk->d_state = state;
542 g_raid_report_disk_state(disk);
543 }
544
545 void
546 g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state)
547 {
548
549 G_RAID_DEBUG1(0, sd->sd_softc,
550 "Subdisk %s:%d-%s state changed from %s to %s.",
551 sd->sd_volume->v_name, sd->sd_pos,
552 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]",
553 g_raid_subdisk_state2str(sd->sd_state),
554 g_raid_subdisk_state2str(state));
555 sd->sd_state = state;
556 if (sd->sd_disk)
557 g_raid_report_disk_state(sd->sd_disk);
558 }
559
560 void
561 g_raid_change_volume_state(struct g_raid_volume *vol, int state)
562 {
563
564 G_RAID_DEBUG1(0, vol->v_softc,
565 "Volume %s state changed from %s to %s.",
566 vol->v_name,
567 g_raid_volume_state2str(vol->v_state),
568 g_raid_volume_state2str(state));
569 vol->v_state = state;
570 }
571
572 /*
573 * --- Events handling functions ---
574 * Events in geom_raid are used to maintain subdisks and volumes status
575 * from one thread to simplify locking.
576 */
577 static void
578 g_raid_event_free(struct g_raid_event *ep)
579 {
580
581 free(ep, M_RAID);
582 }
583
584 int
585 g_raid_event_send(void *arg, int event, int flags)
586 {
587 struct g_raid_softc *sc;
588 struct g_raid_event *ep;
589 int error;
590
591 if ((flags & G_RAID_EVENT_VOLUME) != 0) {
592 sc = ((struct g_raid_volume *)arg)->v_softc;
593 } else if ((flags & G_RAID_EVENT_DISK) != 0) {
594 sc = ((struct g_raid_disk *)arg)->d_softc;
595 } else if ((flags & G_RAID_EVENT_SUBDISK) != 0) {
596 sc = ((struct g_raid_subdisk *)arg)->sd_softc;
597 } else {
598 sc = arg;
599 }
600 ep = malloc(sizeof(*ep), M_RAID,
601 sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT);
602 if (ep == NULL)
603 return (ENOMEM);
604 ep->e_tgt = arg;
605 ep->e_event = event;
606 ep->e_flags = flags;
607 ep->e_error = 0;
608 G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc);
609 mtx_lock(&sc->sc_queue_mtx);
610 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
611 mtx_unlock(&sc->sc_queue_mtx);
612 wakeup(sc);
613
614 if ((flags & G_RAID_EVENT_WAIT) == 0)
615 return (0);
616
617 sx_assert(&sc->sc_lock, SX_XLOCKED);
618 G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep);
619 sx_xunlock(&sc->sc_lock);
620 while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) {
621 mtx_lock(&sc->sc_queue_mtx);
622 MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event",
623 hz * 5);
624 }
625 error = ep->e_error;
626 g_raid_event_free(ep);
627 sx_xlock(&sc->sc_lock);
628 return (error);
629 }
630
631 static void
632 g_raid_event_cancel(struct g_raid_softc *sc, void *tgt)
633 {
634 struct g_raid_event *ep, *tmpep;
635
636 sx_assert(&sc->sc_lock, SX_XLOCKED);
637
638 mtx_lock(&sc->sc_queue_mtx);
639 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
640 if (ep->e_tgt != tgt)
641 continue;
642 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
643 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0)
644 g_raid_event_free(ep);
645 else {
646 ep->e_error = ECANCELED;
647 wakeup(ep);
648 }
649 }
650 mtx_unlock(&sc->sc_queue_mtx);
651 }
652
653 static int
654 g_raid_event_check(struct g_raid_softc *sc, void *tgt)
655 {
656 struct g_raid_event *ep;
657 int res = 0;
658
659 sx_assert(&sc->sc_lock, SX_XLOCKED);
660
661 mtx_lock(&sc->sc_queue_mtx);
662 TAILQ_FOREACH(ep, &sc->sc_events, e_next) {
663 if (ep->e_tgt != tgt)
664 continue;
665 res = 1;
666 break;
667 }
668 mtx_unlock(&sc->sc_queue_mtx);
669 return (res);
670 }
671
672 /*
673 * Return the number of disks in given state.
674 * If state is equal to -1, count all connected disks.
675 */
676 u_int
677 g_raid_ndisks(struct g_raid_softc *sc, int state)
678 {
679 struct g_raid_disk *disk;
680 u_int n;
681
682 sx_assert(&sc->sc_lock, SX_LOCKED);
683
684 n = 0;
685 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
686 if (disk->d_state == state || state == -1)
687 n++;
688 }
689 return (n);
690 }
691
692 /*
693 * Return the number of subdisks in given state.
694 * If state is equal to -1, count all connected disks.
695 */
696 u_int
697 g_raid_nsubdisks(struct g_raid_volume *vol, int state)
698 {
699 struct g_raid_subdisk *subdisk;
700 struct g_raid_softc *sc;
701 u_int i, n ;
702
703 sc = vol->v_softc;
704 sx_assert(&sc->sc_lock, SX_LOCKED);
705
706 n = 0;
707 for (i = 0; i < vol->v_disks_count; i++) {
708 subdisk = &vol->v_subdisks[i];
709 if ((state == -1 &&
710 subdisk->sd_state != G_RAID_SUBDISK_S_NONE) ||
711 subdisk->sd_state == state)
712 n++;
713 }
714 return (n);
715 }
716
717 /*
718 * Return the first subdisk in given state.
719 * If state is equal to -1, then the first connected disks.
720 */
721 struct g_raid_subdisk *
722 g_raid_get_subdisk(struct g_raid_volume *vol, int state)
723 {
724 struct g_raid_subdisk *sd;
725 struct g_raid_softc *sc;
726 u_int i;
727
728 sc = vol->v_softc;
729 sx_assert(&sc->sc_lock, SX_LOCKED);
730
731 for (i = 0; i < vol->v_disks_count; i++) {
732 sd = &vol->v_subdisks[i];
733 if ((state == -1 &&
734 sd->sd_state != G_RAID_SUBDISK_S_NONE) ||
735 sd->sd_state == state)
736 return (sd);
737 }
738 return (NULL);
739 }
740
741 struct g_consumer *
742 g_raid_open_consumer(struct g_raid_softc *sc, const char *name)
743 {
744 struct g_consumer *cp;
745 struct g_provider *pp;
746
747 g_topology_assert();
748
749 if (strncmp(name, "/dev/", 5) == 0)
750 name += 5;
751 pp = g_provider_by_name(name);
752 if (pp == NULL)
753 return (NULL);
754 cp = g_new_consumer(sc->sc_geom);
755 if (g_attach(cp, pp) != 0) {
756 g_destroy_consumer(cp);
757 return (NULL);
758 }
759 if (g_access(cp, 1, 1, 1) != 0) {
760 g_detach(cp);
761 g_destroy_consumer(cp);
762 return (NULL);
763 }
764 return (cp);
765 }
766
767 static u_int
768 g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp)
769 {
770 struct bio *bp;
771 u_int nreqs = 0;
772
773 mtx_lock(&sc->sc_queue_mtx);
774 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
775 if (bp->bio_from == cp)
776 nreqs++;
777 }
778 mtx_unlock(&sc->sc_queue_mtx);
779 return (nreqs);
780 }
781
782 u_int
783 g_raid_nopens(struct g_raid_softc *sc)
784 {
785 struct g_raid_volume *vol;
786 u_int opens;
787
788 opens = 0;
789 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
790 if (vol->v_provider_open != 0)
791 opens++;
792 }
793 return (opens);
794 }
795
796 static int
797 g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp)
798 {
799
800 if (cp->index > 0) {
801 G_RAID_DEBUG1(2, sc,
802 "I/O requests for %s exist, can't destroy it now.",
803 cp->provider->name);
804 return (1);
805 }
806 if (g_raid_nrequests(sc, cp) > 0) {
807 G_RAID_DEBUG1(2, sc,
808 "I/O requests for %s in queue, can't destroy it now.",
809 cp->provider->name);
810 return (1);
811 }
812 return (0);
813 }
814
815 static void
816 g_raid_destroy_consumer(void *arg, int flags __unused)
817 {
818 struct g_consumer *cp;
819
820 g_topology_assert();
821
822 cp = arg;
823 G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
824 g_detach(cp);
825 g_destroy_consumer(cp);
826 }
827
828 void
829 g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp)
830 {
831 struct g_provider *pp;
832 int retaste_wait;
833
834 g_topology_assert_not();
835
836 g_topology_lock();
837 cp->private = NULL;
838 if (g_raid_consumer_is_busy(sc, cp))
839 goto out;
840 pp = cp->provider;
841 retaste_wait = 0;
842 if (cp->acw == 1) {
843 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
844 retaste_wait = 1;
845 }
846 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
847 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
848 if (retaste_wait) {
849 /*
850 * After retaste event was send (inside g_access()), we can send
851 * event to detach and destroy consumer.
852 * A class, which has consumer to the given provider connected
853 * will not receive retaste event for the provider.
854 * This is the way how I ignore retaste events when I close
855 * consumers opened for write: I detach and destroy consumer
856 * after retaste event is sent.
857 */
858 g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL);
859 goto out;
860 }
861 G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name);
862 g_detach(cp);
863 g_destroy_consumer(cp);
864 out:
865 g_topology_unlock();
866 }
867
868 static void
869 g_raid_orphan(struct g_consumer *cp)
870 {
871 struct g_raid_disk *disk;
872
873 g_topology_assert();
874
875 disk = cp->private;
876 if (disk == NULL)
877 return;
878 g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED,
879 G_RAID_EVENT_DISK);
880 }
881
882 static int
883 g_raid_clean(struct g_raid_volume *vol, int acw)
884 {
885 struct g_raid_softc *sc;
886 int timeout;
887
888 sc = vol->v_softc;
889 g_topology_assert_not();
890 sx_assert(&sc->sc_lock, SX_XLOCKED);
891
892 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
893 // return (0);
894 if (!vol->v_dirty)
895 return (0);
896 if (vol->v_writes > 0)
897 return (0);
898 if (acw > 0 || (acw == -1 &&
899 vol->v_provider != NULL && vol->v_provider->acw > 0)) {
900 timeout = g_raid_clean_time - (time_uptime - vol->v_last_write);
901 if (timeout > 0)
902 return (timeout);
903 }
904 vol->v_dirty = 0;
905 G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.",
906 vol->v_name);
907 g_raid_write_metadata(sc, vol, NULL, NULL);
908 return (0);
909 }
910
911 static void
912 g_raid_dirty(struct g_raid_volume *vol)
913 {
914 struct g_raid_softc *sc;
915
916 sc = vol->v_softc;
917 g_topology_assert_not();
918 sx_assert(&sc->sc_lock, SX_XLOCKED);
919
920 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
921 // return;
922 vol->v_dirty = 1;
923 G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.",
924 vol->v_name);
925 g_raid_write_metadata(sc, vol, NULL, NULL);
926 }
927
928 void
929 g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp)
930 {
931 struct g_raid_softc *sc;
932 struct g_raid_volume *vol;
933 struct g_raid_subdisk *sd;
934 struct bio_queue_head queue;
935 struct bio *cbp;
936 int i;
937
938 vol = tr->tro_volume;
939 sc = vol->v_softc;
940
941 /*
942 * Allocate all bios before sending any request, so we can return
943 * ENOMEM in nice and clean way.
944 */
945 bioq_init(&queue);
946 for (i = 0; i < vol->v_disks_count; i++) {
947 sd = &vol->v_subdisks[i];
948 if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
949 sd->sd_state == G_RAID_SUBDISK_S_FAILED)
950 continue;
951 cbp = g_clone_bio(bp);
952 if (cbp == NULL)
953 goto failure;
954 cbp->bio_caller1 = sd;
955 bioq_insert_tail(&queue, cbp);
956 }
957 for (cbp = bioq_first(&queue); cbp != NULL;
958 cbp = bioq_first(&queue)) {
959 bioq_remove(&queue, cbp);
960 sd = cbp->bio_caller1;
961 cbp->bio_caller1 = NULL;
962 g_raid_subdisk_iostart(sd, cbp);
963 }
964 return;
965 failure:
966 for (cbp = bioq_first(&queue); cbp != NULL;
967 cbp = bioq_first(&queue)) {
968 bioq_remove(&queue, cbp);
969 g_destroy_bio(cbp);
970 }
971 if (bp->bio_error == 0)
972 bp->bio_error = ENOMEM;
973 g_raid_iodone(bp, bp->bio_error);
974 }
975
976 static void
977 g_raid_tr_kerneldump_common_done(struct bio *bp)
978 {
979
980 bp->bio_flags |= BIO_DONE;
981 }
982
983 int
984 g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
985 void *virtual, vm_offset_t physical, off_t offset, size_t length)
986 {
987 struct g_raid_softc *sc;
988 struct g_raid_volume *vol;
989 struct bio bp;
990
991 vol = tr->tro_volume;
992 sc = vol->v_softc;
993
994 bzero(&bp, sizeof(bp));
995 bp.bio_cmd = BIO_WRITE;
996 bp.bio_done = g_raid_tr_kerneldump_common_done;
997 bp.bio_attribute = NULL;
998 bp.bio_offset = offset;
999 bp.bio_length = length;
1000 bp.bio_data = virtual;
1001 bp.bio_to = vol->v_provider;
1002
1003 g_raid_start(&bp);
1004 while (!(bp.bio_flags & BIO_DONE)) {
1005 G_RAID_DEBUG1(4, sc, "Poll...");
1006 g_raid_poll(sc);
1007 DELAY(10);
1008 }
1009
1010 return (bp.bio_error != 0 ? EIO : 0);
1011 }
1012
1013 static int
1014 g_raid_dump(void *arg,
1015 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1016 {
1017 struct g_raid_volume *vol;
1018 int error;
1019
1020 vol = (struct g_raid_volume *)arg;
1021 G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.",
1022 (long long unsigned)offset, (long long unsigned)length);
1023
1024 error = G_RAID_TR_KERNELDUMP(vol->v_tr,
1025 virtual, physical, offset, length);
1026 return (error);
1027 }
1028
1029 static void
1030 g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp)
1031 {
1032 struct g_kerneldump *gkd;
1033 struct g_provider *pp;
1034 struct g_raid_volume *vol;
1035
1036 gkd = (struct g_kerneldump*)bp->bio_data;
1037 pp = bp->bio_to;
1038 vol = pp->private;
1039 g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)",
1040 pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
1041 gkd->di.dumper = g_raid_dump;
1042 gkd->di.priv = vol;
1043 gkd->di.blocksize = vol->v_sectorsize;
1044 gkd->di.maxiosize = DFLTPHYS;
1045 gkd->di.mediaoffset = gkd->offset;
1046 if ((gkd->offset + gkd->length) > vol->v_mediasize)
1047 gkd->length = vol->v_mediasize - gkd->offset;
1048 gkd->di.mediasize = gkd->length;
1049 g_io_deliver(bp, 0);
1050 }
1051
1052 static void
1053 g_raid_start(struct bio *bp)
1054 {
1055 struct g_raid_softc *sc;
1056
1057 sc = bp->bio_to->geom->softc;
1058 /*
1059 * If sc == NULL or there are no valid disks, provider's error
1060 * should be set and g_raid_start() should not be called at all.
1061 */
1062 // KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING,
1063 // ("Provider's error should be set (error=%d)(mirror=%s).",
1064 // bp->bio_to->error, bp->bio_to->name));
1065 G_RAID_LOGREQ(3, bp, "Request received.");
1066
1067 switch (bp->bio_cmd) {
1068 case BIO_READ:
1069 case BIO_WRITE:
1070 case BIO_DELETE:
1071 case BIO_FLUSH:
1072 break;
1073 case BIO_GETATTR:
1074 if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
1075 g_raid_kerneldump(sc, bp);
1076 else
1077 g_io_deliver(bp, EOPNOTSUPP);
1078 return;
1079 default:
1080 g_io_deliver(bp, EOPNOTSUPP);
1081 return;
1082 }
1083 mtx_lock(&sc->sc_queue_mtx);
1084 bioq_disksort(&sc->sc_queue, bp);
1085 mtx_unlock(&sc->sc_queue_mtx);
1086 if (!dumping) {
1087 G_RAID_DEBUG1(4, sc, "Waking up %p.", sc);
1088 wakeup(sc);
1089 }
1090 }
1091
1092 static int
1093 g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len)
1094 {
1095 /*
1096 * 5 cases:
1097 * (1) bp entirely below NO
1098 * (2) bp entirely above NO
1099 * (3) bp start below, but end in range YES
1100 * (4) bp entirely within YES
1101 * (5) bp starts within, ends above YES
1102 *
1103 * lock range 10-19 (offset 10 length 10)
1104 * (1) 1-5: first if kicks it out
1105 * (2) 30-35: second if kicks it out
1106 * (3) 5-15: passes both ifs
1107 * (4) 12-14: passes both ifs
1108 * (5) 19-20: passes both
1109 */
1110 off_t lend = lstart + len - 1;
1111 off_t bstart = bp->bio_offset;
1112 off_t bend = bp->bio_offset + bp->bio_length - 1;
1113
1114 if (bend < lstart)
1115 return (0);
1116 if (lend < bstart)
1117 return (0);
1118 return (1);
1119 }
1120
1121 static int
1122 g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp)
1123 {
1124 struct g_raid_lock *lp;
1125
1126 sx_assert(&vol->v_softc->sc_lock, SX_LOCKED);
1127
1128 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1129 if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length))
1130 return (1);
1131 }
1132 return (0);
1133 }
1134
1135 static void
1136 g_raid_start_request(struct bio *bp)
1137 {
1138 struct g_raid_softc *sc;
1139 struct g_raid_volume *vol;
1140
1141 sc = bp->bio_to->geom->softc;
1142 sx_assert(&sc->sc_lock, SX_LOCKED);
1143 vol = bp->bio_to->private;
1144
1145 /*
1146 * Check to see if this item is in a locked range. If so,
1147 * queue it to our locked queue and return. We'll requeue
1148 * it when the range is unlocked. Internal I/O for the
1149 * rebuild/rescan/recovery process is excluded from this
1150 * check so we can actually do the recovery.
1151 */
1152 if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) &&
1153 g_raid_is_in_locked_range(vol, bp)) {
1154 G_RAID_LOGREQ(3, bp, "Defer request.");
1155 bioq_insert_tail(&vol->v_locked, bp);
1156 return;
1157 }
1158
1159 /*
1160 * If we're actually going to do the write/delete, then
1161 * update the idle stats for the volume.
1162 */
1163 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1164 if (!vol->v_dirty)
1165 g_raid_dirty(vol);
1166 vol->v_writes++;
1167 }
1168
1169 /*
1170 * Put request onto inflight queue, so we can check if new
1171 * synchronization requests don't collide with it. Then tell
1172 * the transformation layer to start the I/O.
1173 */
1174 bioq_insert_tail(&vol->v_inflight, bp);
1175 G_RAID_LOGREQ(4, bp, "Request started");
1176 G_RAID_TR_IOSTART(vol->v_tr, bp);
1177 }
1178
1179 static void
1180 g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp)
1181 {
1182 off_t off, len;
1183 struct bio *nbp;
1184 struct g_raid_lock *lp;
1185
1186 vol->v_pending_lock = 0;
1187 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1188 if (lp->l_pending) {
1189 off = lp->l_offset;
1190 len = lp->l_length;
1191 lp->l_pending = 0;
1192 TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) {
1193 if (g_raid_bio_overlaps(nbp, off, len))
1194 lp->l_pending++;
1195 }
1196 if (lp->l_pending) {
1197 vol->v_pending_lock = 1;
1198 G_RAID_DEBUG1(4, vol->v_softc,
1199 "Deferred lock(%jd, %jd) has %d pending",
1200 (intmax_t)off, (intmax_t)(off + len),
1201 lp->l_pending);
1202 continue;
1203 }
1204 G_RAID_DEBUG1(4, vol->v_softc,
1205 "Deferred lock of %jd to %jd completed",
1206 (intmax_t)off, (intmax_t)(off + len));
1207 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1208 }
1209 }
1210 }
1211
1212 void
1213 g_raid_iodone(struct bio *bp, int error)
1214 {
1215 struct g_raid_softc *sc;
1216 struct g_raid_volume *vol;
1217
1218 sc = bp->bio_to->geom->softc;
1219 sx_assert(&sc->sc_lock, SX_LOCKED);
1220 vol = bp->bio_to->private;
1221 G_RAID_LOGREQ(3, bp, "Request done: %d.", error);
1222
1223 /* Update stats if we done write/delete. */
1224 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1225 vol->v_writes--;
1226 vol->v_last_write = time_uptime;
1227 }
1228
1229 bioq_remove(&vol->v_inflight, bp);
1230 if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp))
1231 g_raid_finish_with_locked_ranges(vol, bp);
1232 getmicrouptime(&vol->v_last_done);
1233 g_io_deliver(bp, error);
1234 }
1235
1236 int
1237 g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
1238 struct bio *ignore, void *argp)
1239 {
1240 struct g_raid_softc *sc;
1241 struct g_raid_lock *lp;
1242 struct bio *bp;
1243
1244 sc = vol->v_softc;
1245 lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO);
1246 LIST_INSERT_HEAD(&vol->v_locks, lp, l_next);
1247 lp->l_offset = off;
1248 lp->l_length = len;
1249 lp->l_callback_arg = argp;
1250
1251 lp->l_pending = 0;
1252 TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) {
1253 if (bp != ignore && g_raid_bio_overlaps(bp, off, len))
1254 lp->l_pending++;
1255 }
1256
1257 /*
1258 * If there are any writes that are pending, we return EBUSY. All
1259 * callers will have to wait until all pending writes clear.
1260 */
1261 if (lp->l_pending > 0) {
1262 vol->v_pending_lock = 1;
1263 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend",
1264 (intmax_t)off, (intmax_t)(off+len), lp->l_pending);
1265 return (EBUSY);
1266 }
1267 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd",
1268 (intmax_t)off, (intmax_t)(off+len));
1269 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1270 return (0);
1271 }
1272
1273 int
1274 g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len)
1275 {
1276 struct g_raid_lock *lp;
1277 struct g_raid_softc *sc;
1278 struct bio *bp;
1279
1280 sc = vol->v_softc;
1281 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1282 if (lp->l_offset == off && lp->l_length == len) {
1283 LIST_REMOVE(lp, l_next);
1284 /* XXX
1285 * Right now we just put them all back on the queue
1286 * and hope for the best. We hope this because any
1287 * locked ranges will go right back on this list
1288 * when the worker thread runs.
1289 * XXX
1290 */
1291 G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd",
1292 (intmax_t)lp->l_offset,
1293 (intmax_t)(lp->l_offset+lp->l_length));
1294 mtx_lock(&sc->sc_queue_mtx);
1295 while ((bp = bioq_takefirst(&vol->v_locked)) != NULL)
1296 bioq_disksort(&sc->sc_queue, bp);
1297 mtx_unlock(&sc->sc_queue_mtx);
1298 free(lp, M_RAID);
1299 return (0);
1300 }
1301 }
1302 return (EINVAL);
1303 }
1304
1305 void
1306 g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp)
1307 {
1308 struct g_consumer *cp;
1309 struct g_raid_disk *disk, *tdisk;
1310
1311 bp->bio_caller1 = sd;
1312
1313 /*
1314 * Make sure that the disk is present. Generally it is a task of
1315 * transformation layers to not send requests to absent disks, but
1316 * it is better to be safe and report situation then sorry.
1317 */
1318 if (sd->sd_disk == NULL) {
1319 G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!");
1320 nodisk:
1321 bp->bio_from = NULL;
1322 bp->bio_to = NULL;
1323 bp->bio_error = ENXIO;
1324 g_raid_disk_done(bp);
1325 return;
1326 }
1327 disk = sd->sd_disk;
1328 if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
1329 disk->d_state != G_RAID_DISK_S_FAILED) {
1330 G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a "
1331 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
1332 goto nodisk;
1333 }
1334
1335 cp = disk->d_consumer;
1336 bp->bio_from = cp;
1337 bp->bio_to = cp->provider;
1338 cp->index++;
1339
1340 /* Update average disks load. */
1341 TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) {
1342 if (tdisk->d_consumer == NULL)
1343 tdisk->d_load = 0;
1344 else
1345 tdisk->d_load = (tdisk->d_consumer->index *
1346 G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8;
1347 }
1348
1349 disk->d_last_offset = bp->bio_offset + bp->bio_length;
1350 if (dumping) {
1351 G_RAID_LOGREQ(3, bp, "Sending dumping request.");
1352 if (bp->bio_cmd == BIO_WRITE) {
1353 bp->bio_error = g_raid_subdisk_kerneldump(sd,
1354 bp->bio_data, 0, bp->bio_offset, bp->bio_length);
1355 } else
1356 bp->bio_error = EOPNOTSUPP;
1357 g_raid_disk_done(bp);
1358 } else {
1359 bp->bio_done = g_raid_disk_done;
1360 bp->bio_offset += sd->sd_offset;
1361 G_RAID_LOGREQ(3, bp, "Sending request.");
1362 g_io_request(bp, cp);
1363 }
1364 }
1365
1366 int
1367 g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd,
1368 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1369 {
1370
1371 if (sd->sd_disk == NULL)
1372 return (ENXIO);
1373 if (sd->sd_disk->d_kd.di.dumper == NULL)
1374 return (EOPNOTSUPP);
1375 return (dump_write(&sd->sd_disk->d_kd.di,
1376 virtual, physical,
1377 sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset,
1378 length));
1379 }
1380
1381 static void
1382 g_raid_disk_done(struct bio *bp)
1383 {
1384 struct g_raid_softc *sc;
1385 struct g_raid_subdisk *sd;
1386
1387 sd = bp->bio_caller1;
1388 sc = sd->sd_softc;
1389 mtx_lock(&sc->sc_queue_mtx);
1390 bioq_disksort(&sc->sc_queue, bp);
1391 mtx_unlock(&sc->sc_queue_mtx);
1392 if (!dumping)
1393 wakeup(sc);
1394 }
1395
1396 static void
1397 g_raid_disk_done_request(struct bio *bp)
1398 {
1399 struct g_raid_softc *sc;
1400 struct g_raid_disk *disk;
1401 struct g_raid_subdisk *sd;
1402 struct g_raid_volume *vol;
1403
1404 g_topology_assert_not();
1405
1406 G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error);
1407 sd = bp->bio_caller1;
1408 sc = sd->sd_softc;
1409 vol = sd->sd_volume;
1410 if (bp->bio_from != NULL) {
1411 bp->bio_from->index--;
1412 disk = bp->bio_from->private;
1413 if (disk == NULL)
1414 g_raid_kill_consumer(sc, bp->bio_from);
1415 }
1416 bp->bio_offset -= sd->sd_offset;
1417
1418 G_RAID_TR_IODONE(vol->v_tr, sd, bp);
1419 }
1420
1421 static void
1422 g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep)
1423 {
1424
1425 if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0)
1426 ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event);
1427 else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0)
1428 ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event);
1429 else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0)
1430 ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event);
1431 else
1432 ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event);
1433 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) {
1434 KASSERT(ep->e_error == 0,
1435 ("Error cannot be handled."));
1436 g_raid_event_free(ep);
1437 } else {
1438 ep->e_flags |= G_RAID_EVENT_DONE;
1439 G_RAID_DEBUG1(4, sc, "Waking up %p.", ep);
1440 mtx_lock(&sc->sc_queue_mtx);
1441 wakeup(ep);
1442 mtx_unlock(&sc->sc_queue_mtx);
1443 }
1444 }
1445
1446 /*
1447 * Worker thread.
1448 */
1449 static void
1450 g_raid_worker(void *arg)
1451 {
1452 struct g_raid_softc *sc;
1453 struct g_raid_event *ep;
1454 struct g_raid_volume *vol;
1455 struct bio *bp;
1456 struct timeval now, t;
1457 int timeout, rv;
1458
1459 sc = arg;
1460 thread_lock(curthread);
1461 sched_prio(curthread, PRIBIO);
1462 thread_unlock(curthread);
1463
1464 sx_xlock(&sc->sc_lock);
1465 for (;;) {
1466 mtx_lock(&sc->sc_queue_mtx);
1467 /*
1468 * First take a look at events.
1469 * This is important to handle events before any I/O requests.
1470 */
1471 bp = NULL;
1472 vol = NULL;
1473 rv = 0;
1474 ep = TAILQ_FIRST(&sc->sc_events);
1475 if (ep != NULL)
1476 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1477 else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL)
1478 ;
1479 else {
1480 getmicrouptime(&now);
1481 t = now;
1482 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1483 if (bioq_first(&vol->v_inflight) == NULL &&
1484 vol->v_tr &&
1485 timevalcmp(&vol->v_last_done, &t, < ))
1486 t = vol->v_last_done;
1487 }
1488 timevalsub(&t, &now);
1489 timeout = g_raid_idle_threshold +
1490 t.tv_sec * 1000000 + t.tv_usec;
1491 if (timeout > 0) {
1492 /*
1493 * Two steps to avoid overflows at HZ=1000
1494 * and idle timeouts > 2.1s. Some rounding
1495 * errors can occur, but they are < 1tick,
1496 * which is deemed to be close enough for
1497 * this purpose.
1498 */
1499 int micpertic = 1000000 / hz;
1500 timeout = (timeout + micpertic - 1) / micpertic;
1501 sx_xunlock(&sc->sc_lock);
1502 MSLEEP(rv, sc, &sc->sc_queue_mtx,
1503 PRIBIO | PDROP, "-", timeout);
1504 sx_xlock(&sc->sc_lock);
1505 goto process;
1506 } else
1507 rv = EWOULDBLOCK;
1508 }
1509 mtx_unlock(&sc->sc_queue_mtx);
1510 process:
1511 if (ep != NULL) {
1512 g_raid_handle_event(sc, ep);
1513 } else if (bp != NULL) {
1514 if (bp->bio_to != NULL &&
1515 bp->bio_to->geom == sc->sc_geom)
1516 g_raid_start_request(bp);
1517 else
1518 g_raid_disk_done_request(bp);
1519 } else if (rv == EWOULDBLOCK) {
1520 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1521 if (vol->v_writes == 0 && vol->v_dirty)
1522 g_raid_clean(vol, -1);
1523 if (bioq_first(&vol->v_inflight) == NULL &&
1524 vol->v_tr) {
1525 t.tv_sec = g_raid_idle_threshold / 1000000;
1526 t.tv_usec = g_raid_idle_threshold % 1000000;
1527 timevaladd(&t, &vol->v_last_done);
1528 getmicrouptime(&now);
1529 if (timevalcmp(&t, &now, <= )) {
1530 G_RAID_TR_IDLE(vol->v_tr);
1531 vol->v_last_done = now;
1532 }
1533 }
1534 }
1535 }
1536 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
1537 g_raid_destroy_node(sc, 1); /* May not return. */
1538 }
1539 }
1540
1541 static void
1542 g_raid_poll(struct g_raid_softc *sc)
1543 {
1544 struct g_raid_event *ep;
1545 struct bio *bp;
1546
1547 sx_xlock(&sc->sc_lock);
1548 mtx_lock(&sc->sc_queue_mtx);
1549 /*
1550 * First take a look at events.
1551 * This is important to handle events before any I/O requests.
1552 */
1553 ep = TAILQ_FIRST(&sc->sc_events);
1554 if (ep != NULL) {
1555 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1556 mtx_unlock(&sc->sc_queue_mtx);
1557 g_raid_handle_event(sc, ep);
1558 goto out;
1559 }
1560 bp = bioq_takefirst(&sc->sc_queue);
1561 if (bp != NULL) {
1562 mtx_unlock(&sc->sc_queue_mtx);
1563 if (bp->bio_from == NULL ||
1564 bp->bio_from->geom != sc->sc_geom)
1565 g_raid_start_request(bp);
1566 else
1567 g_raid_disk_done_request(bp);
1568 }
1569 out:
1570 sx_xunlock(&sc->sc_lock);
1571 }
1572
1573 static void
1574 g_raid_launch_provider(struct g_raid_volume *vol)
1575 {
1576 struct g_raid_disk *disk;
1577 struct g_raid_softc *sc;
1578 struct g_provider *pp;
1579 char name[G_RAID_MAX_VOLUMENAME];
1580 off_t off;
1581
1582 sc = vol->v_softc;
1583 sx_assert(&sc->sc_lock, SX_LOCKED);
1584
1585 g_topology_lock();
1586 /* Try to name provider with volume name. */
1587 snprintf(name, sizeof(name), "raid/%s", vol->v_name);
1588 if (g_raid_name_format == 0 || vol->v_name[0] == 0 ||
1589 g_provider_by_name(name) != NULL) {
1590 /* Otherwise use sequential volume number. */
1591 snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id);
1592 }
1593 pp = g_new_providerf(sc->sc_geom, "%s", name);
1594 pp->private = vol;
1595 pp->mediasize = vol->v_mediasize;
1596 pp->sectorsize = vol->v_sectorsize;
1597 pp->stripesize = 0;
1598 pp->stripeoffset = 0;
1599 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1600 vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
1601 vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE ||
1602 vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) {
1603 if ((disk = vol->v_subdisks[0].sd_disk) != NULL &&
1604 disk->d_consumer != NULL &&
1605 disk->d_consumer->provider != NULL) {
1606 pp->stripesize = disk->d_consumer->provider->stripesize;
1607 off = disk->d_consumer->provider->stripeoffset;
1608 pp->stripeoffset = off + vol->v_subdisks[0].sd_offset;
1609 if (off > 0)
1610 pp->stripeoffset %= off;
1611 }
1612 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) {
1613 pp->stripesize *= (vol->v_disks_count - 1);
1614 pp->stripeoffset *= (vol->v_disks_count - 1);
1615 }
1616 } else
1617 pp->stripesize = vol->v_strip_size;
1618 vol->v_provider = pp;
1619 g_error_provider(pp, 0);
1620 g_topology_unlock();
1621 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.",
1622 pp->name, vol->v_name);
1623 }
1624
1625 static void
1626 g_raid_destroy_provider(struct g_raid_volume *vol)
1627 {
1628 struct g_raid_softc *sc;
1629 struct g_provider *pp;
1630 struct bio *bp, *tmp;
1631
1632 g_topology_assert_not();
1633 sc = vol->v_softc;
1634 pp = vol->v_provider;
1635 KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name));
1636
1637 g_topology_lock();
1638 g_error_provider(pp, ENXIO);
1639 mtx_lock(&sc->sc_queue_mtx);
1640 TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) {
1641 if (bp->bio_to != pp)
1642 continue;
1643 bioq_remove(&sc->sc_queue, bp);
1644 g_io_deliver(bp, ENXIO);
1645 }
1646 mtx_unlock(&sc->sc_queue_mtx);
1647 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.",
1648 pp->name, vol->v_name);
1649 g_wither_provider(pp, ENXIO);
1650 g_topology_unlock();
1651 vol->v_provider = NULL;
1652 }
1653
1654 /*
1655 * Update device state.
1656 */
1657 static int
1658 g_raid_update_volume(struct g_raid_volume *vol, u_int event)
1659 {
1660 struct g_raid_softc *sc;
1661
1662 sc = vol->v_softc;
1663 sx_assert(&sc->sc_lock, SX_XLOCKED);
1664
1665 G_RAID_DEBUG1(2, sc, "Event %s for volume %s.",
1666 g_raid_volume_event2str(event),
1667 vol->v_name);
1668 switch (event) {
1669 case G_RAID_VOLUME_E_DOWN:
1670 if (vol->v_provider != NULL)
1671 g_raid_destroy_provider(vol);
1672 break;
1673 case G_RAID_VOLUME_E_UP:
1674 if (vol->v_provider == NULL)
1675 g_raid_launch_provider(vol);
1676 break;
1677 case G_RAID_VOLUME_E_START:
1678 if (vol->v_tr)
1679 G_RAID_TR_START(vol->v_tr);
1680 return (0);
1681 default:
1682 if (sc->sc_md)
1683 G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event);
1684 return (0);
1685 }
1686
1687 /* Manage root mount release. */
1688 if (vol->v_starting) {
1689 vol->v_starting = 0;
1690 G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount);
1691 root_mount_rel(vol->v_rootmount);
1692 vol->v_rootmount = NULL;
1693 }
1694 if (vol->v_stopping && vol->v_provider_open == 0)
1695 g_raid_destroy_volume(vol);
1696 return (0);
1697 }
1698
1699 /*
1700 * Update subdisk state.
1701 */
1702 static int
1703 g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event)
1704 {
1705 struct g_raid_softc *sc;
1706 struct g_raid_volume *vol;
1707
1708 sc = sd->sd_softc;
1709 vol = sd->sd_volume;
1710 sx_assert(&sc->sc_lock, SX_XLOCKED);
1711
1712 G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.",
1713 g_raid_subdisk_event2str(event),
1714 vol->v_name, sd->sd_pos,
1715 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
1716 if (vol->v_tr)
1717 G_RAID_TR_EVENT(vol->v_tr, sd, event);
1718
1719 return (0);
1720 }
1721
1722 /*
1723 * Update disk state.
1724 */
1725 static int
1726 g_raid_update_disk(struct g_raid_disk *disk, u_int event)
1727 {
1728 struct g_raid_softc *sc;
1729
1730 sc = disk->d_softc;
1731 sx_assert(&sc->sc_lock, SX_XLOCKED);
1732
1733 G_RAID_DEBUG1(2, sc, "Event %s for disk %s.",
1734 g_raid_disk_event2str(event),
1735 g_raid_get_diskname(disk));
1736
1737 if (sc->sc_md)
1738 G_RAID_MD_EVENT(sc->sc_md, disk, event);
1739 return (0);
1740 }
1741
1742 /*
1743 * Node event.
1744 */
1745 static int
1746 g_raid_update_node(struct g_raid_softc *sc, u_int event)
1747 {
1748 sx_assert(&sc->sc_lock, SX_XLOCKED);
1749
1750 G_RAID_DEBUG1(2, sc, "Event %s for the array.",
1751 g_raid_node_event2str(event));
1752
1753 if (event == G_RAID_NODE_E_WAKE)
1754 return (0);
1755 if (sc->sc_md)
1756 G_RAID_MD_EVENT(sc->sc_md, NULL, event);
1757 return (0);
1758 }
1759
1760 static int
1761 g_raid_access(struct g_provider *pp, int acr, int acw, int ace)
1762 {
1763 struct g_raid_volume *vol;
1764 struct g_raid_softc *sc;
1765 int dcw, opens, error = 0;
1766
1767 g_topology_assert();
1768 sc = pp->geom->softc;
1769 vol = pp->private;
1770 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
1771 KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name));
1772
1773 G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
1774 acr, acw, ace);
1775 dcw = pp->acw + acw;
1776
1777 g_topology_unlock();
1778 sx_xlock(&sc->sc_lock);
1779 /* Deny new opens while dying. */
1780 if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) {
1781 error = ENXIO;
1782 goto out;
1783 }
1784 if (dcw == 0 && vol->v_dirty)
1785 g_raid_clean(vol, dcw);
1786 vol->v_provider_open += acr + acw + ace;
1787 /* Handle delayed node destruction. */
1788 if (sc->sc_stopping == G_RAID_DESTROY_DELAYED &&
1789 vol->v_provider_open == 0) {
1790 /* Count open volumes. */
1791 opens = g_raid_nopens(sc);
1792 if (opens == 0) {
1793 sc->sc_stopping = G_RAID_DESTROY_HARD;
1794 /* Wake up worker to make it selfdestruct. */
1795 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1796 }
1797 }
1798 /* Handle open volume destruction. */
1799 if (vol->v_stopping && vol->v_provider_open == 0)
1800 g_raid_destroy_volume(vol);
1801 out:
1802 sx_xunlock(&sc->sc_lock);
1803 g_topology_lock();
1804 return (error);
1805 }
1806
1807 struct g_raid_softc *
1808 g_raid_create_node(struct g_class *mp,
1809 const char *name, struct g_raid_md_object *md)
1810 {
1811 struct g_raid_softc *sc;
1812 struct g_geom *gp;
1813 int error;
1814
1815 g_topology_assert();
1816 G_RAID_DEBUG(1, "Creating array %s.", name);
1817
1818 gp = g_new_geomf(mp, "%s", name);
1819 sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
1820 gp->start = g_raid_start;
1821 gp->orphan = g_raid_orphan;
1822 gp->access = g_raid_access;
1823 gp->dumpconf = g_raid_dumpconf;
1824
1825 sc->sc_md = md;
1826 sc->sc_geom = gp;
1827 sc->sc_flags = 0;
1828 TAILQ_INIT(&sc->sc_volumes);
1829 TAILQ_INIT(&sc->sc_disks);
1830 sx_init(&sc->sc_lock, "graid:lock");
1831 mtx_init(&sc->sc_queue_mtx, "graid:queue", NULL, MTX_DEF);
1832 TAILQ_INIT(&sc->sc_events);
1833 bioq_init(&sc->sc_queue);
1834 gp->softc = sc;
1835 error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0,
1836 "g_raid %s", name);
1837 if (error != 0) {
1838 G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name);
1839 mtx_destroy(&sc->sc_queue_mtx);
1840 sx_destroy(&sc->sc_lock);
1841 g_destroy_geom(sc->sc_geom);
1842 free(sc, M_RAID);
1843 return (NULL);
1844 }
1845
1846 G_RAID_DEBUG1(0, sc, "Array %s created.", name);
1847 return (sc);
1848 }
1849
1850 struct g_raid_volume *
1851 g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id)
1852 {
1853 struct g_raid_volume *vol, *vol1;
1854 int i;
1855
1856 G_RAID_DEBUG1(1, sc, "Creating volume %s.", name);
1857 vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO);
1858 vol->v_softc = sc;
1859 strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME);
1860 vol->v_state = G_RAID_VOLUME_S_STARTING;
1861 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
1862 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN;
1863 vol->v_rotate_parity = 1;
1864 bioq_init(&vol->v_inflight);
1865 bioq_init(&vol->v_locked);
1866 LIST_INIT(&vol->v_locks);
1867 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
1868 vol->v_subdisks[i].sd_softc = sc;
1869 vol->v_subdisks[i].sd_volume = vol;
1870 vol->v_subdisks[i].sd_pos = i;
1871 vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE;
1872 }
1873
1874 /* Find free ID for this volume. */
1875 g_topology_lock();
1876 vol1 = vol;
1877 if (id >= 0) {
1878 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1879 if (vol1->v_global_id == id)
1880 break;
1881 }
1882 }
1883 if (vol1 != NULL) {
1884 for (id = 0; ; id++) {
1885 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1886 if (vol1->v_global_id == id)
1887 break;
1888 }
1889 if (vol1 == NULL)
1890 break;
1891 }
1892 }
1893 vol->v_global_id = id;
1894 LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next);
1895 g_topology_unlock();
1896
1897 /* Delay root mounting. */
1898 vol->v_rootmount = root_mount_hold("GRAID");
1899 G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount);
1900 vol->v_starting = 1;
1901 TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
1902 return (vol);
1903 }
1904
1905 struct g_raid_disk *
1906 g_raid_create_disk(struct g_raid_softc *sc)
1907 {
1908 struct g_raid_disk *disk;
1909
1910 G_RAID_DEBUG1(1, sc, "Creating disk.");
1911 disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
1912 disk->d_softc = sc;
1913 disk->d_state = G_RAID_DISK_S_NONE;
1914 TAILQ_INIT(&disk->d_subdisks);
1915 TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next);
1916 return (disk);
1917 }
1918
1919 int g_raid_start_volume(struct g_raid_volume *vol)
1920 {
1921 struct g_raid_tr_class *class;
1922 struct g_raid_tr_object *obj;
1923 int status;
1924
1925 G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name);
1926 LIST_FOREACH(class, &g_raid_tr_classes, trc_list) {
1927 if (!class->trc_enable)
1928 continue;
1929 G_RAID_DEBUG1(2, vol->v_softc,
1930 "Tasting volume %s for %s transformation.",
1931 vol->v_name, class->name);
1932 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
1933 M_WAITOK);
1934 obj->tro_class = class;
1935 obj->tro_volume = vol;
1936 status = G_RAID_TR_TASTE(obj, vol);
1937 if (status != G_RAID_TR_TASTE_FAIL)
1938 break;
1939 kobj_delete((kobj_t)obj, M_RAID);
1940 }
1941 if (class == NULL) {
1942 G_RAID_DEBUG1(0, vol->v_softc,
1943 "No transformation module found for %s.",
1944 vol->v_name);
1945 vol->v_tr = NULL;
1946 g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED);
1947 g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN,
1948 G_RAID_EVENT_VOLUME);
1949 return (-1);
1950 }
1951 G_RAID_DEBUG1(2, vol->v_softc,
1952 "Transformation module %s chosen for %s.",
1953 class->name, vol->v_name);
1954 vol->v_tr = obj;
1955 return (0);
1956 }
1957
1958 int
1959 g_raid_destroy_node(struct g_raid_softc *sc, int worker)
1960 {
1961 struct g_raid_volume *vol, *tmpv;
1962 struct g_raid_disk *disk, *tmpd;
1963 int error = 0;
1964
1965 sc->sc_stopping = G_RAID_DESTROY_HARD;
1966 TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
1967 if (g_raid_destroy_volume(vol))
1968 error = EBUSY;
1969 }
1970 if (error)
1971 return (error);
1972 TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
1973 if (g_raid_destroy_disk(disk))
1974 error = EBUSY;
1975 }
1976 if (error)
1977 return (error);
1978 if (sc->sc_md) {
1979 G_RAID_MD_FREE(sc->sc_md);
1980 kobj_delete((kobj_t)sc->sc_md, M_RAID);
1981 sc->sc_md = NULL;
1982 }
1983 if (sc->sc_geom != NULL) {
1984 G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name);
1985 g_topology_lock();
1986 sc->sc_geom->softc = NULL;
1987 g_wither_geom(sc->sc_geom, ENXIO);
1988 g_topology_unlock();
1989 sc->sc_geom = NULL;
1990 } else
1991 G_RAID_DEBUG(1, "Array destroyed.");
1992 if (worker) {
1993 g_raid_event_cancel(sc, sc);
1994 mtx_destroy(&sc->sc_queue_mtx);
1995 sx_xunlock(&sc->sc_lock);
1996 sx_destroy(&sc->sc_lock);
1997 wakeup(&sc->sc_stopping);
1998 free(sc, M_RAID);
1999 curthread->td_pflags &= ~TDP_GEOM;
2000 G_RAID_DEBUG(1, "Thread exiting.");
2001 kproc_exit(0);
2002 } else {
2003 /* Wake up worker to make it selfdestruct. */
2004 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2005 }
2006 return (0);
2007 }
2008
2009 int
2010 g_raid_destroy_volume(struct g_raid_volume *vol)
2011 {
2012 struct g_raid_softc *sc;
2013 struct g_raid_disk *disk;
2014 int i;
2015
2016 sc = vol->v_softc;
2017 G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name);
2018 vol->v_stopping = 1;
2019 if (vol->v_state != G_RAID_VOLUME_S_STOPPED) {
2020 if (vol->v_tr) {
2021 G_RAID_TR_STOP(vol->v_tr);
2022 return (EBUSY);
2023 } else
2024 vol->v_state = G_RAID_VOLUME_S_STOPPED;
2025 }
2026 if (g_raid_event_check(sc, vol) != 0)
2027 return (EBUSY);
2028 if (vol->v_provider != NULL)
2029 return (EBUSY);
2030 if (vol->v_provider_open != 0)
2031 return (EBUSY);
2032 if (vol->v_tr) {
2033 G_RAID_TR_FREE(vol->v_tr);
2034 kobj_delete((kobj_t)vol->v_tr, M_RAID);
2035 vol->v_tr = NULL;
2036 }
2037 if (vol->v_rootmount)
2038 root_mount_rel(vol->v_rootmount);
2039 g_topology_lock();
2040 LIST_REMOVE(vol, v_global_next);
2041 g_topology_unlock();
2042 TAILQ_REMOVE(&sc->sc_volumes, vol, v_next);
2043 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
2044 g_raid_event_cancel(sc, &vol->v_subdisks[i]);
2045 disk = vol->v_subdisks[i].sd_disk;
2046 if (disk == NULL)
2047 continue;
2048 TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
2049 }
2050 G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name);
2051 if (sc->sc_md)
2052 G_RAID_MD_FREE_VOLUME(sc->sc_md, vol);
2053 g_raid_event_cancel(sc, vol);
2054 free(vol, M_RAID);
2055 if (sc->sc_stopping == G_RAID_DESTROY_HARD) {
2056 /* Wake up worker to let it selfdestruct. */
2057 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2058 }
2059 return (0);
2060 }
2061
2062 int
2063 g_raid_destroy_disk(struct g_raid_disk *disk)
2064 {
2065 struct g_raid_softc *sc;
2066 struct g_raid_subdisk *sd, *tmp;
2067
2068 sc = disk->d_softc;
2069 G_RAID_DEBUG1(2, sc, "Destroying disk.");
2070 if (disk->d_consumer) {
2071 g_raid_kill_consumer(sc, disk->d_consumer);
2072 disk->d_consumer = NULL;
2073 }
2074 TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
2075 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
2076 g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
2077 G_RAID_EVENT_SUBDISK);
2078 TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next);
2079 sd->sd_disk = NULL;
2080 }
2081 TAILQ_REMOVE(&sc->sc_disks, disk, d_next);
2082 if (sc->sc_md)
2083 G_RAID_MD_FREE_DISK(sc->sc_md, disk);
2084 g_raid_event_cancel(sc, disk);
2085 free(disk, M_RAID);
2086 return (0);
2087 }
2088
2089 int
2090 g_raid_destroy(struct g_raid_softc *sc, int how)
2091 {
2092 int opens;
2093
2094 g_topology_assert_not();
2095 if (sc == NULL)
2096 return (ENXIO);
2097 sx_assert(&sc->sc_lock, SX_XLOCKED);
2098
2099 /* Count open volumes. */
2100 opens = g_raid_nopens(sc);
2101
2102 /* React on some opened volumes. */
2103 if (opens > 0) {
2104 switch (how) {
2105 case G_RAID_DESTROY_SOFT:
2106 G_RAID_DEBUG1(1, sc,
2107 "%d volumes are still open.",
2108 opens);
2109 return (EBUSY);
2110 case G_RAID_DESTROY_DELAYED:
2111 G_RAID_DEBUG1(1, sc,
2112 "Array will be destroyed on last close.");
2113 sc->sc_stopping = G_RAID_DESTROY_DELAYED;
2114 return (EBUSY);
2115 case G_RAID_DESTROY_HARD:
2116 G_RAID_DEBUG1(1, sc,
2117 "%d volumes are still open.",
2118 opens);
2119 }
2120 }
2121
2122 /* Mark node for destruction. */
2123 sc->sc_stopping = G_RAID_DESTROY_HARD;
2124 /* Wake up worker to let it selfdestruct. */
2125 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2126 /* Sleep until node destroyed. */
2127 sx_sleep(&sc->sc_stopping, &sc->sc_lock,
2128 PRIBIO | PDROP, "r:destroy", 0);
2129 return (0);
2130 }
2131
2132 static void
2133 g_raid_taste_orphan(struct g_consumer *cp)
2134 {
2135
2136 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2137 cp->provider->name));
2138 }
2139
2140 static struct g_geom *
2141 g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2142 {
2143 struct g_consumer *cp;
2144 struct g_geom *gp, *geom;
2145 struct g_raid_md_class *class;
2146 struct g_raid_md_object *obj;
2147 int status;
2148
2149 g_topology_assert();
2150 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2151 if (!g_raid_enable)
2152 return (NULL);
2153 G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
2154
2155 gp = g_new_geomf(mp, "raid:taste");
2156 /*
2157 * This orphan function should be never called.
2158 */
2159 gp->orphan = g_raid_taste_orphan;
2160 cp = g_new_consumer(gp);
2161 g_attach(cp, pp);
2162
2163 geom = NULL;
2164 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2165 if (!class->mdc_enable)
2166 continue;
2167 G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.",
2168 pp->name, class->name);
2169 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2170 M_WAITOK);
2171 obj->mdo_class = class;
2172 status = G_RAID_MD_TASTE(obj, mp, cp, &geom);
2173 if (status != G_RAID_MD_TASTE_NEW)
2174 kobj_delete((kobj_t)obj, M_RAID);
2175 if (status != G_RAID_MD_TASTE_FAIL)
2176 break;
2177 }
2178
2179 g_detach(cp);
2180 g_destroy_consumer(cp);
2181 g_destroy_geom(gp);
2182 G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name);
2183 return (geom);
2184 }
2185
2186 int
2187 g_raid_create_node_format(const char *format, struct gctl_req *req,
2188 struct g_geom **gp)
2189 {
2190 struct g_raid_md_class *class;
2191 struct g_raid_md_object *obj;
2192 int status;
2193
2194 G_RAID_DEBUG(2, "Creating array for %s metadata.", format);
2195 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2196 if (strcasecmp(class->name, format) == 0)
2197 break;
2198 }
2199 if (class == NULL) {
2200 G_RAID_DEBUG(1, "No support for %s metadata.", format);
2201 return (G_RAID_MD_TASTE_FAIL);
2202 }
2203 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2204 M_WAITOK);
2205 obj->mdo_class = class;
2206 status = G_RAID_MD_CREATE_REQ(obj, &g_raid_class, req, gp);
2207 if (status != G_RAID_MD_TASTE_NEW)
2208 kobj_delete((kobj_t)obj, M_RAID);
2209 return (status);
2210 }
2211
2212 static int
2213 g_raid_destroy_geom(struct gctl_req *req __unused,
2214 struct g_class *mp __unused, struct g_geom *gp)
2215 {
2216 struct g_raid_softc *sc;
2217 int error;
2218
2219 g_topology_unlock();
2220 sc = gp->softc;
2221 sx_xlock(&sc->sc_lock);
2222 g_cancel_event(sc);
2223 error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT);
2224 if (error != 0)
2225 sx_xunlock(&sc->sc_lock);
2226 g_topology_lock();
2227 return (error);
2228 }
2229
2230 void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
2231 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2232 {
2233
2234 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
2235 return;
2236 if (sc->sc_md)
2237 G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk);
2238 }
2239
2240 void g_raid_fail_disk(struct g_raid_softc *sc,
2241 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2242 {
2243
2244 if (disk == NULL)
2245 disk = sd->sd_disk;
2246 if (disk == NULL) {
2247 G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!");
2248 return;
2249 }
2250 if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
2251 G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a "
2252 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
2253 return;
2254 }
2255 if (sc->sc_md)
2256 G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk);
2257 }
2258
2259 static void
2260 g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2261 struct g_consumer *cp, struct g_provider *pp)
2262 {
2263 struct g_raid_softc *sc;
2264 struct g_raid_volume *vol;
2265 struct g_raid_subdisk *sd;
2266 struct g_raid_disk *disk;
2267 int i, s;
2268
2269 g_topology_assert();
2270
2271 sc = gp->softc;
2272 if (sc == NULL)
2273 return;
2274 if (pp != NULL) {
2275 vol = pp->private;
2276 g_topology_unlock();
2277 sx_xlock(&sc->sc_lock);
2278 sbuf_printf(sb, "%s<Label>%s</Label>\n", indent,
2279 vol->v_name);
2280 sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent,
2281 g_raid_volume_level2str(vol->v_raid_level,
2282 vol->v_raid_level_qualifier));
2283 sbuf_printf(sb,
2284 "%s<Transformation>%s</Transformation>\n", indent,
2285 vol->v_tr ? vol->v_tr->tro_class->name : "NONE");
2286 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2287 vol->v_disks_count);
2288 sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent,
2289 vol->v_strip_size);
2290 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2291 g_raid_volume_state2str(vol->v_state));
2292 sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent,
2293 vol->v_dirty ? "Yes" : "No");
2294 sbuf_printf(sb, "%s<Subdisks>", indent);
2295 for (i = 0; i < vol->v_disks_count; i++) {
2296 sd = &vol->v_subdisks[i];
2297 if (sd->sd_disk != NULL &&
2298 sd->sd_disk->d_consumer != NULL) {
2299 sbuf_printf(sb, "%s ",
2300 g_raid_get_diskname(sd->sd_disk));
2301 } else {
2302 sbuf_printf(sb, "NONE ");
2303 }
2304 sbuf_printf(sb, "(%s",
2305 g_raid_subdisk_state2str(sd->sd_state));
2306 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2307 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2308 sbuf_printf(sb, " %d%%",
2309 (int)(sd->sd_rebuild_pos * 100 /
2310 sd->sd_size));
2311 }
2312 sbuf_printf(sb, ")");
2313 if (i + 1 < vol->v_disks_count)
2314 sbuf_printf(sb, ", ");
2315 }
2316 sbuf_printf(sb, "</Subdisks>\n");
2317 sx_xunlock(&sc->sc_lock);
2318 g_topology_lock();
2319 } else if (cp != NULL) {
2320 disk = cp->private;
2321 if (disk == NULL)
2322 return;
2323 g_topology_unlock();
2324 sx_xlock(&sc->sc_lock);
2325 sbuf_printf(sb, "%s<State>%s", indent,
2326 g_raid_disk_state2str(disk->d_state));
2327 if (!TAILQ_EMPTY(&disk->d_subdisks)) {
2328 sbuf_printf(sb, " (");
2329 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2330 sbuf_printf(sb, "%s",
2331 g_raid_subdisk_state2str(sd->sd_state));
2332 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2333 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2334 sbuf_printf(sb, " %d%%",
2335 (int)(sd->sd_rebuild_pos * 100 /
2336 sd->sd_size));
2337 }
2338 if (TAILQ_NEXT(sd, sd_next))
2339 sbuf_printf(sb, ", ");
2340 }
2341 sbuf_printf(sb, ")");
2342 }
2343 sbuf_printf(sb, "</State>\n");
2344 sbuf_printf(sb, "%s<Subdisks>", indent);
2345 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2346 sbuf_printf(sb, "r%d(%s):%d@%ju",
2347 sd->sd_volume->v_global_id,
2348 sd->sd_volume->v_name,
2349 sd->sd_pos, sd->sd_offset);
2350 if (TAILQ_NEXT(sd, sd_next))
2351 sbuf_printf(sb, ", ");
2352 }
2353 sbuf_printf(sb, "</Subdisks>\n");
2354 sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent,
2355 disk->d_read_errs);
2356 sx_xunlock(&sc->sc_lock);
2357 g_topology_lock();
2358 } else {
2359 g_topology_unlock();
2360 sx_xlock(&sc->sc_lock);
2361 if (sc->sc_md) {
2362 sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent,
2363 sc->sc_md->mdo_class->name);
2364 }
2365 if (!TAILQ_EMPTY(&sc->sc_volumes)) {
2366 s = 0xff;
2367 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
2368 if (vol->v_state < s)
2369 s = vol->v_state;
2370 }
2371 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2372 g_raid_volume_state2str(s));
2373 }
2374 sx_xunlock(&sc->sc_lock);
2375 g_topology_lock();
2376 }
2377 }
2378
2379 static void
2380 g_raid_shutdown_pre_sync(void *arg, int howto)
2381 {
2382 struct g_class *mp;
2383 struct g_geom *gp, *gp2;
2384 struct g_raid_softc *sc;
2385 int error;
2386
2387 mp = arg;
2388 DROP_GIANT();
2389 g_topology_lock();
2390 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2391 if ((sc = gp->softc) == NULL)
2392 continue;
2393 g_topology_unlock();
2394 sx_xlock(&sc->sc_lock);
2395 g_cancel_event(sc);
2396 error = g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
2397 if (error != 0)
2398 sx_xunlock(&sc->sc_lock);
2399 g_topology_lock();
2400 }
2401 g_topology_unlock();
2402 PICKUP_GIANT();
2403 }
2404
2405 static void
2406 g_raid_init(struct g_class *mp)
2407 {
2408
2409 g_raid_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
2410 g_raid_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
2411 if (g_raid_pre_sync == NULL)
2412 G_RAID_DEBUG(0, "Warning! Cannot register shutdown event.");
2413 g_raid_started = 1;
2414 }
2415
2416 static void
2417 g_raid_fini(struct g_class *mp)
2418 {
2419
2420 if (g_raid_pre_sync != NULL)
2421 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid_pre_sync);
2422 g_raid_started = 0;
2423 }
2424
2425 int
2426 g_raid_md_modevent(module_t mod, int type, void *arg)
2427 {
2428 struct g_raid_md_class *class, *c, *nc;
2429 int error;
2430
2431 error = 0;
2432 class = arg;
2433 switch (type) {
2434 case MOD_LOAD:
2435 c = LIST_FIRST(&g_raid_md_classes);
2436 if (c == NULL || c->mdc_priority > class->mdc_priority)
2437 LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list);
2438 else {
2439 while ((nc = LIST_NEXT(c, mdc_list)) != NULL &&
2440 nc->mdc_priority < class->mdc_priority)
2441 c = nc;
2442 LIST_INSERT_AFTER(c, class, mdc_list);
2443 }
2444 if (g_raid_started)
2445 g_retaste(&g_raid_class);
2446 break;
2447 case MOD_UNLOAD:
2448 LIST_REMOVE(class, mdc_list);
2449 break;
2450 default:
2451 error = EOPNOTSUPP;
2452 break;
2453 }
2454
2455 return (error);
2456 }
2457
2458 int
2459 g_raid_tr_modevent(module_t mod, int type, void *arg)
2460 {
2461 struct g_raid_tr_class *class, *c, *nc;
2462 int error;
2463
2464 error = 0;
2465 class = arg;
2466 switch (type) {
2467 case MOD_LOAD:
2468 c = LIST_FIRST(&g_raid_tr_classes);
2469 if (c == NULL || c->trc_priority > class->trc_priority)
2470 LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list);
2471 else {
2472 while ((nc = LIST_NEXT(c, trc_list)) != NULL &&
2473 nc->trc_priority < class->trc_priority)
2474 c = nc;
2475 LIST_INSERT_AFTER(c, class, trc_list);
2476 }
2477 break;
2478 case MOD_UNLOAD:
2479 LIST_REMOVE(class, trc_list);
2480 break;
2481 default:
2482 error = EOPNOTSUPP;
2483 break;
2484 }
2485
2486 return (error);
2487 }
2488
2489 /*
2490 * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid)
2491 * to reduce module priority, allowing submodules to register them first.
2492 */
2493 static moduledata_t g_raid_mod = {
2494 "g_raid",
2495 g_modevent,
2496 &g_raid_class
2497 };
2498 DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD);
2499 MODULE_VERSION(geom_raid, 0);
Cache object: 927cc5f03ddc444bf3d6ffefb07d9b37
|