1 /*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/10.0/sys/geom/journal/g_journal.c 253141 2013-07-10 10:11:43Z kib $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/eventhandler.h>
42 #include <sys/proc.h>
43 #include <sys/kthread.h>
44 #include <sys/sched.h>
45 #include <sys/taskqueue.h>
46 #include <sys/vnode.h>
47 #include <sys/sbuf.h>
48 #ifdef GJ_MEMDEBUG
49 #include <sys/stack.h>
50 #include <sys/kdb.h>
51 #endif
52 #include <vm/vm.h>
53 #include <vm/vm_kern.h>
54 #include <geom/geom.h>
55
56 #include <geom/journal/g_journal.h>
57
58 FEATURE(geom_journal, "GEOM journaling support");
59
60 /*
61 * On-disk journal format:
62 *
63 * JH - Journal header
64 * RH - Record header
65 *
66 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
67 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
68 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
69 *
70 */
71
72 CTASSERT(sizeof(struct g_journal_header) <= 512);
73 CTASSERT(sizeof(struct g_journal_record_header) <= 512);
74
75 static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
76 static struct mtx g_journal_cache_mtx;
77 MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
78
79 const struct g_journal_desc *g_journal_filesystems[] = {
80 &g_journal_ufs,
81 NULL
82 };
83
84 SYSCTL_DECL(_kern_geom);
85
86 int g_journal_debug = 0;
87 TUNABLE_INT("kern.geom.journal.debug", &g_journal_debug);
88 static u_int g_journal_switch_time = 10;
89 static u_int g_journal_force_switch = 70;
90 static u_int g_journal_parallel_flushes = 16;
91 static u_int g_journal_parallel_copies = 16;
92 static u_int g_journal_accept_immediately = 64;
93 static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
94 static u_int g_journal_do_optimize = 1;
95
96 static SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0,
97 "GEOM_JOURNAL stuff");
98 SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RW, &g_journal_debug, 0,
99 "Debug level");
100 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
101 &g_journal_switch_time, 0, "Switch journals every N seconds");
102 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
103 &g_journal_force_switch, 0, "Force switch when journal is N% full");
104 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
105 &g_journal_parallel_flushes, 0,
106 "Number of flush I/O requests to send in parallel");
107 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
108 &g_journal_accept_immediately, 0,
109 "Number of I/O requests accepted immediately");
110 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
111 &g_journal_parallel_copies, 0,
112 "Number of copy I/O requests to send in parallel");
113 static int
114 g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
115 {
116 u_int entries;
117 int error;
118
119 entries = g_journal_record_entries;
120 error = sysctl_handle_int(oidp, &entries, 0, req);
121 if (error != 0 || req->newptr == NULL)
122 return (error);
123 if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
124 return (EINVAL);
125 g_journal_record_entries = entries;
126 return (0);
127 }
128 SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
129 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
130 "Maximum number of entires in one journal record");
131 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
132 &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
133
134 static u_int g_journal_cache_used = 0;
135 static u_int g_journal_cache_limit = 64 * 1024 * 1024;
136 TUNABLE_INT("kern.geom.journal.cache.limit", &g_journal_cache_limit);
137 static u_int g_journal_cache_divisor = 2;
138 TUNABLE_INT("kern.geom.journal.cache.divisor", &g_journal_cache_divisor);
139 static u_int g_journal_cache_switch = 90;
140 static u_int g_journal_cache_misses = 0;
141 static u_int g_journal_cache_alloc_failures = 0;
142 static u_int g_journal_cache_low = 0;
143
144 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
145 "GEOM_JOURNAL cache");
146 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
147 &g_journal_cache_used, 0, "Number of allocated bytes");
148 static int
149 g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
150 {
151 u_int limit;
152 int error;
153
154 limit = g_journal_cache_limit;
155 error = sysctl_handle_int(oidp, &limit, 0, req);
156 if (error != 0 || req->newptr == NULL)
157 return (error);
158 g_journal_cache_limit = limit;
159 g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
160 return (0);
161 }
162 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
163 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_limit_sysctl, "I",
164 "Maximum number of allocated bytes");
165 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
166 &g_journal_cache_divisor, 0,
167 "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
168 static int
169 g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
170 {
171 u_int cswitch;
172 int error;
173
174 cswitch = g_journal_cache_switch;
175 error = sysctl_handle_int(oidp, &cswitch, 0, req);
176 if (error != 0 || req->newptr == NULL)
177 return (error);
178 if (cswitch < 0 || cswitch > 100)
179 return (EINVAL);
180 g_journal_cache_switch = cswitch;
181 g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
182 return (0);
183 }
184 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
185 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
186 "Force switch when we hit this percent of cache use");
187 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
188 &g_journal_cache_misses, 0, "Number of cache misses");
189 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
190 &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
191
192 static u_long g_journal_stats_bytes_skipped = 0;
193 static u_long g_journal_stats_combined_ios = 0;
194 static u_long g_journal_stats_switches = 0;
195 static u_long g_journal_stats_wait_for_copy = 0;
196 static u_long g_journal_stats_journal_full = 0;
197 static u_long g_journal_stats_low_mem = 0;
198
199 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
200 "GEOM_JOURNAL statistics");
201 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
202 &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
203 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
204 &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
205 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
206 &g_journal_stats_switches, 0, "Number of journal switches");
207 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
208 &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
209 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
210 &g_journal_stats_journal_full, 0,
211 "Number of times journal was almost full.");
212 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
213 &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
214
215 static g_taste_t g_journal_taste;
216 static g_ctl_req_t g_journal_config;
217 static g_dumpconf_t g_journal_dumpconf;
218 static g_init_t g_journal_init;
219 static g_fini_t g_journal_fini;
220
221 struct g_class g_journal_class = {
222 .name = G_JOURNAL_CLASS_NAME,
223 .version = G_VERSION,
224 .taste = g_journal_taste,
225 .ctlreq = g_journal_config,
226 .dumpconf = g_journal_dumpconf,
227 .init = g_journal_init,
228 .fini = g_journal_fini
229 };
230
231 static int g_journal_destroy(struct g_journal_softc *sc);
232 static void g_journal_metadata_update(struct g_journal_softc *sc);
233 static void g_journal_switch_wait(struct g_journal_softc *sc);
234
235 #define GJ_SWITCHER_WORKING 0
236 #define GJ_SWITCHER_DIE 1
237 #define GJ_SWITCHER_DIED 2
238 static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
239 static int g_journal_switcher_wokenup = 0;
240 static int g_journal_sync_requested = 0;
241
242 #ifdef GJ_MEMDEBUG
243 struct meminfo {
244 size_t mi_size;
245 struct stack mi_stack;
246 };
247 #endif
248
249 /*
250 * We use our own malloc/realloc/free funtions, so we can collect statistics
251 * and force journal switch when we're running out of cache.
252 */
253 static void *
254 gj_malloc(size_t size, int flags)
255 {
256 void *p;
257 #ifdef GJ_MEMDEBUG
258 struct meminfo *mi;
259 #endif
260
261 mtx_lock(&g_journal_cache_mtx);
262 if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
263 g_journal_cache_used + size > g_journal_cache_low) {
264 GJ_DEBUG(1, "No cache, waking up the switcher.");
265 g_journal_switcher_wokenup = 1;
266 wakeup(&g_journal_switcher_state);
267 }
268 if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
269 g_journal_cache_used + size > g_journal_cache_limit) {
270 mtx_unlock(&g_journal_cache_mtx);
271 g_journal_cache_alloc_failures++;
272 return (NULL);
273 }
274 g_journal_cache_used += size;
275 mtx_unlock(&g_journal_cache_mtx);
276 flags &= ~M_NOWAIT;
277 #ifndef GJ_MEMDEBUG
278 p = malloc(size, M_JOURNAL, flags | M_WAITOK);
279 #else
280 mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
281 p = (u_char *)mi + sizeof(*mi);
282 mi->mi_size = size;
283 stack_save(&mi->mi_stack);
284 #endif
285 return (p);
286 }
287
288 static void
289 gj_free(void *p, size_t size)
290 {
291 #ifdef GJ_MEMDEBUG
292 struct meminfo *mi;
293 #endif
294
295 KASSERT(p != NULL, ("p=NULL"));
296 KASSERT(size > 0, ("size=0"));
297 mtx_lock(&g_journal_cache_mtx);
298 KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
299 g_journal_cache_used -= size;
300 mtx_unlock(&g_journal_cache_mtx);
301 #ifdef GJ_MEMDEBUG
302 mi = p = (void *)((u_char *)p - sizeof(*mi));
303 if (mi->mi_size != size) {
304 printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
305 mi->mi_size);
306 printf("GJOURNAL: Alloc backtrace:\n");
307 stack_print(&mi->mi_stack);
308 printf("GJOURNAL: Free backtrace:\n");
309 kdb_backtrace();
310 }
311 #endif
312 free(p, M_JOURNAL);
313 }
314
315 static void *
316 gj_realloc(void *p, size_t size, size_t oldsize)
317 {
318 void *np;
319
320 #ifndef GJ_MEMDEBUG
321 mtx_lock(&g_journal_cache_mtx);
322 g_journal_cache_used -= oldsize;
323 g_journal_cache_used += size;
324 mtx_unlock(&g_journal_cache_mtx);
325 np = realloc(p, size, M_JOURNAL, M_WAITOK);
326 #else
327 np = gj_malloc(size, M_WAITOK);
328 bcopy(p, np, MIN(oldsize, size));
329 gj_free(p, oldsize);
330 #endif
331 return (np);
332 }
333
334 static void
335 g_journal_check_overflow(struct g_journal_softc *sc)
336 {
337 off_t length, used;
338
339 if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
340 sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
341 (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
342 sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
343 sc->sc_journal_offset < sc->sc_active.jj_offset)) {
344 panic("Journal overflow "
345 "(id = %u joffset=%jd active=%jd inactive=%jd)",
346 (unsigned)sc->sc_id,
347 (intmax_t)sc->sc_journal_offset,
348 (intmax_t)sc->sc_active.jj_offset,
349 (intmax_t)sc->sc_inactive.jj_offset);
350 }
351 if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
352 length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
353 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
354 } else {
355 length = sc->sc_jend - sc->sc_active.jj_offset;
356 length += sc->sc_inactive.jj_offset - sc->sc_jstart;
357 if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
358 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
359 else {
360 used = sc->sc_jend - sc->sc_active.jj_offset;
361 used += sc->sc_journal_offset - sc->sc_jstart;
362 }
363 }
364 /* Already woken up? */
365 if (g_journal_switcher_wokenup)
366 return;
367 /*
368 * If the active journal takes more than g_journal_force_switch precent
369 * of free journal space, we force journal switch.
370 */
371 KASSERT(length > 0,
372 ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
373 (intmax_t)length, (intmax_t)used,
374 (intmax_t)sc->sc_active.jj_offset,
375 (intmax_t)sc->sc_inactive.jj_offset,
376 (intmax_t)sc->sc_journal_offset));
377 if ((used * 100) / length > g_journal_force_switch) {
378 g_journal_stats_journal_full++;
379 GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
380 sc->sc_name, (used * 100) / length);
381 mtx_lock(&g_journal_cache_mtx);
382 g_journal_switcher_wokenup = 1;
383 wakeup(&g_journal_switcher_state);
384 mtx_unlock(&g_journal_cache_mtx);
385 }
386 }
387
388 static void
389 g_journal_orphan(struct g_consumer *cp)
390 {
391 struct g_journal_softc *sc;
392 char name[256];
393 int error;
394
395 g_topology_assert();
396 sc = cp->geom->softc;
397 strlcpy(name, cp->provider->name, sizeof(name));
398 GJ_DEBUG(0, "Lost provider %s.", name);
399 if (sc == NULL)
400 return;
401 error = g_journal_destroy(sc);
402 if (error == 0)
403 GJ_DEBUG(0, "Journal %s destroyed.", name);
404 else {
405 GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
406 "Destroy it manually after last close.", sc->sc_name,
407 error);
408 }
409 }
410
411 static int
412 g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
413 {
414 struct g_journal_softc *sc;
415 int dcr, dcw, dce;
416
417 g_topology_assert();
418 GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
419 acr, acw, ace);
420
421 dcr = pp->acr + acr;
422 dcw = pp->acw + acw;
423 dce = pp->ace + ace;
424
425 sc = pp->geom->softc;
426 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
427 if (acr <= 0 && acw <= 0 && ace <= 0)
428 return (0);
429 else
430 return (ENXIO);
431 }
432 if (pp->acw == 0 && dcw > 0) {
433 GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
434 sc->sc_flags &= ~GJF_DEVICE_CLEAN;
435 g_topology_unlock();
436 g_journal_metadata_update(sc);
437 g_topology_lock();
438 } /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
439 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
440 sc->sc_flags |= GJF_DEVICE_CLEAN;
441 g_topology_unlock();
442 g_journal_metadata_update(sc);
443 g_topology_lock();
444 } */
445 return (0);
446 }
447
448 static void
449 g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
450 {
451
452 bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
453 data += sizeof(GJ_HEADER_MAGIC);
454 le32enc(data, hdr->jh_journal_id);
455 data += 4;
456 le32enc(data, hdr->jh_journal_next_id);
457 }
458
459 static int
460 g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
461 {
462
463 bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
464 data += sizeof(hdr->jh_magic);
465 if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
466 return (EINVAL);
467 hdr->jh_journal_id = le32dec(data);
468 data += 4;
469 hdr->jh_journal_next_id = le32dec(data);
470 return (0);
471 }
472
473 static void
474 g_journal_flush_cache(struct g_journal_softc *sc)
475 {
476 struct bintime bt;
477 int error;
478
479 if (sc->sc_bio_flush == 0)
480 return;
481 GJ_TIMER_START(1, &bt);
482 if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
483 error = g_io_flush(sc->sc_jconsumer);
484 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
485 sc->sc_jconsumer->provider->name, error);
486 }
487 if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
488 /*
489 * TODO: This could be called in parallel with the
490 * previous call.
491 */
492 error = g_io_flush(sc->sc_dconsumer);
493 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
494 sc->sc_dconsumer->provider->name, error);
495 }
496 GJ_TIMER_STOP(1, &bt, "Cache flush time");
497 }
498
499 static int
500 g_journal_write_header(struct g_journal_softc *sc)
501 {
502 struct g_journal_header hdr;
503 struct g_consumer *cp;
504 u_char *buf;
505 int error;
506
507 cp = sc->sc_jconsumer;
508 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
509
510 strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
511 hdr.jh_journal_id = sc->sc_journal_id;
512 hdr.jh_journal_next_id = sc->sc_journal_next_id;
513 g_journal_header_encode(&hdr, buf);
514 error = g_write_data(cp, sc->sc_journal_offset, buf,
515 cp->provider->sectorsize);
516 /* if (error == 0) */
517 sc->sc_journal_offset += cp->provider->sectorsize;
518
519 gj_free(buf, cp->provider->sectorsize);
520 return (error);
521 }
522
523 /*
524 * Every journal record has a header and data following it.
525 * Functions below are used to decode the header before storing it to
526 * little endian and to encode it after reading to system endianess.
527 */
528 static void
529 g_journal_record_header_encode(struct g_journal_record_header *hdr,
530 u_char *data)
531 {
532 struct g_journal_entry *ent;
533 u_int i;
534
535 bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
536 data += sizeof(GJ_RECORD_HEADER_MAGIC);
537 le32enc(data, hdr->jrh_journal_id);
538 data += 8;
539 le16enc(data, hdr->jrh_nentries);
540 data += 2;
541 bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
542 data += 8;
543 for (i = 0; i < hdr->jrh_nentries; i++) {
544 ent = &hdr->jrh_entries[i];
545 le64enc(data, ent->je_joffset);
546 data += 8;
547 le64enc(data, ent->je_offset);
548 data += 8;
549 le64enc(data, ent->je_length);
550 data += 8;
551 }
552 }
553
554 static int
555 g_journal_record_header_decode(const u_char *data,
556 struct g_journal_record_header *hdr)
557 {
558 struct g_journal_entry *ent;
559 u_int i;
560
561 bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
562 data += sizeof(hdr->jrh_magic);
563 if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
564 return (EINVAL);
565 hdr->jrh_journal_id = le32dec(data);
566 data += 8;
567 hdr->jrh_nentries = le16dec(data);
568 data += 2;
569 if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
570 return (EINVAL);
571 bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
572 data += 8;
573 for (i = 0; i < hdr->jrh_nentries; i++) {
574 ent = &hdr->jrh_entries[i];
575 ent->je_joffset = le64dec(data);
576 data += 8;
577 ent->je_offset = le64dec(data);
578 data += 8;
579 ent->je_length = le64dec(data);
580 data += 8;
581 }
582 return (0);
583 }
584
585 /*
586 * Function reads metadata from a provider (via the given consumer), decodes
587 * it to system endianess and verifies its correctness.
588 */
589 static int
590 g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
591 {
592 struct g_provider *pp;
593 u_char *buf;
594 int error;
595
596 g_topology_assert();
597
598 error = g_access(cp, 1, 0, 0);
599 if (error != 0)
600 return (error);
601 pp = cp->provider;
602 g_topology_unlock();
603 /* Metadata is stored in last sector. */
604 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
605 &error);
606 g_topology_lock();
607 g_access(cp, -1, 0, 0);
608 if (buf == NULL) {
609 GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
610 cp->provider->name, error);
611 return (error);
612 }
613
614 /* Decode metadata. */
615 error = journal_metadata_decode(buf, md);
616 g_free(buf);
617 /* Is this is gjournal provider at all? */
618 if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
619 return (EINVAL);
620 /*
621 * Are we able to handle this version of metadata?
622 * We only maintain backward compatibility.
623 */
624 if (md->md_version > G_JOURNAL_VERSION) {
625 GJ_DEBUG(0,
626 "Kernel module is too old to handle metadata from %s.",
627 cp->provider->name);
628 return (EINVAL);
629 }
630 /* Is checksum correct? */
631 if (error != 0) {
632 GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
633 cp->provider->name);
634 return (error);
635 }
636 return (0);
637 }
638
639 /*
640 * Two functions below are responsible for updating metadata.
641 * Only metadata on the data provider is updated (we need to update
642 * information about active journal in there).
643 */
644 static void
645 g_journal_metadata_done(struct bio *bp)
646 {
647
648 /*
649 * There is not much we can do on error except informing about it.
650 */
651 if (bp->bio_error != 0) {
652 GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
653 bp->bio_error);
654 } else {
655 GJ_LOGREQ(2, bp, "Metadata updated.");
656 }
657 gj_free(bp->bio_data, bp->bio_length);
658 g_destroy_bio(bp);
659 }
660
661 static void
662 g_journal_metadata_update(struct g_journal_softc *sc)
663 {
664 struct g_journal_metadata md;
665 struct g_consumer *cp;
666 struct bio *bp;
667 u_char *sector;
668
669 cp = sc->sc_dconsumer;
670 sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
671 strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
672 md.md_version = G_JOURNAL_VERSION;
673 md.md_id = sc->sc_id;
674 md.md_type = sc->sc_orig_type;
675 md.md_jstart = sc->sc_jstart;
676 md.md_jend = sc->sc_jend;
677 md.md_joffset = sc->sc_inactive.jj_offset;
678 md.md_jid = sc->sc_journal_previous_id;
679 md.md_flags = 0;
680 if (sc->sc_flags & GJF_DEVICE_CLEAN)
681 md.md_flags |= GJ_FLAG_CLEAN;
682
683 if (sc->sc_flags & GJF_DEVICE_HARDCODED)
684 strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
685 else
686 bzero(md.md_provider, sizeof(md.md_provider));
687 md.md_provsize = cp->provider->mediasize;
688 journal_metadata_encode(&md, sector);
689
690 /*
691 * Flush the cache, so we know all data are on disk.
692 * We write here informations like "journal is consistent", so we need
693 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
694 * where metadata is stored on disk, but not all data.
695 */
696 g_journal_flush_cache(sc);
697
698 bp = g_alloc_bio();
699 bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
700 bp->bio_length = cp->provider->sectorsize;
701 bp->bio_data = sector;
702 bp->bio_cmd = BIO_WRITE;
703 if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
704 bp->bio_done = g_journal_metadata_done;
705 g_io_request(bp, cp);
706 } else {
707 bp->bio_done = NULL;
708 g_io_request(bp, cp);
709 biowait(bp, "gjmdu");
710 g_journal_metadata_done(bp);
711 }
712
713 /*
714 * Be sure metadata reached the disk.
715 */
716 g_journal_flush_cache(sc);
717 }
718
719 /*
720 * This is where the I/O request comes from the GEOM.
721 */
722 static void
723 g_journal_start(struct bio *bp)
724 {
725 struct g_journal_softc *sc;
726
727 sc = bp->bio_to->geom->softc;
728 GJ_LOGREQ(3, bp, "Request received.");
729
730 switch (bp->bio_cmd) {
731 case BIO_READ:
732 case BIO_WRITE:
733 mtx_lock(&sc->sc_mtx);
734 bioq_insert_tail(&sc->sc_regular_queue, bp);
735 wakeup(sc);
736 mtx_unlock(&sc->sc_mtx);
737 return;
738 case BIO_GETATTR:
739 if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
740 strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
741 bp->bio_completed = strlen(bp->bio_to->name) + 1;
742 g_io_deliver(bp, 0);
743 return;
744 }
745 /* FALLTHROUGH */
746 case BIO_DELETE:
747 default:
748 g_io_deliver(bp, EOPNOTSUPP);
749 return;
750 }
751 }
752
753 static void
754 g_journal_std_done(struct bio *bp)
755 {
756 struct g_journal_softc *sc;
757
758 sc = bp->bio_from->geom->softc;
759 mtx_lock(&sc->sc_mtx);
760 bioq_insert_tail(&sc->sc_back_queue, bp);
761 wakeup(sc);
762 mtx_unlock(&sc->sc_mtx);
763 }
764
765 static struct bio *
766 g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
767 int flags)
768 {
769 struct bio *bp;
770
771 bp = g_alloc_bio();
772 bp->bio_offset = start;
773 bp->bio_joffset = joffset;
774 bp->bio_length = end - start;
775 bp->bio_cmd = BIO_WRITE;
776 bp->bio_done = g_journal_std_done;
777 if (data == NULL)
778 bp->bio_data = NULL;
779 else {
780 bp->bio_data = gj_malloc(bp->bio_length, flags);
781 if (bp->bio_data != NULL)
782 bcopy(data, bp->bio_data, bp->bio_length);
783 }
784 return (bp);
785 }
786
787 #define g_journal_insert_bio(head, bp, flags) \
788 g_journal_insert((head), (bp)->bio_offset, \
789 (bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset, \
790 (bp)->bio_data, flags)
791 /*
792 * The function below does a lot more than just inserting bio to the queue.
793 * It keeps the queue sorted by offset and ensures that there are no doubled
794 * data (it combines bios where ranges overlap).
795 *
796 * The function returns the number of bios inserted (as bio can be splitted).
797 */
798 static int
799 g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
800 u_char *data, int flags)
801 {
802 struct bio *nbp, *cbp, *pbp;
803 off_t cstart, cend;
804 u_char *tmpdata;
805 int n;
806
807 GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
808 joffset);
809 n = 0;
810 pbp = NULL;
811 GJQ_FOREACH(*head, cbp) {
812 cstart = cbp->bio_offset;
813 cend = cbp->bio_offset + cbp->bio_length;
814
815 if (nstart >= cend) {
816 /*
817 * +-------------+
818 * | |
819 * | current | +-------------+
820 * | bio | | |
821 * | | | new |
822 * +-------------+ | bio |
823 * | |
824 * +-------------+
825 */
826 GJ_DEBUG(3, "INSERT(%p): 1", *head);
827 } else if (nend <= cstart) {
828 /*
829 * +-------------+
830 * | |
831 * +-------------+ | current |
832 * | | | bio |
833 * | new | | |
834 * | bio | +-------------+
835 * | |
836 * +-------------+
837 */
838 nbp = g_journal_new_bio(nstart, nend, joffset, data,
839 flags);
840 if (pbp == NULL)
841 *head = nbp;
842 else
843 pbp->bio_next = nbp;
844 nbp->bio_next = cbp;
845 n++;
846 GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
847 pbp);
848 goto end;
849 } else if (nstart <= cstart && nend >= cend) {
850 /*
851 * +-------------+ +-------------+
852 * | current bio | | current bio |
853 * +---+-------------+---+ +-------------+---+
854 * | | | | | | |
855 * | | | | | | |
856 * | +-------------+ | +-------------+ |
857 * | new bio | | new bio |
858 * +---------------------+ +-----------------+
859 *
860 * +-------------+ +-------------+
861 * | current bio | | current bio |
862 * +---+-------------+ +-------------+
863 * | | | | |
864 * | | | | |
865 * | +-------------+ +-------------+
866 * | new bio | | new bio |
867 * +-----------------+ +-------------+
868 */
869 g_journal_stats_bytes_skipped += cbp->bio_length;
870 cbp->bio_offset = nstart;
871 cbp->bio_joffset = joffset;
872 cbp->bio_length = cend - nstart;
873 if (cbp->bio_data != NULL) {
874 gj_free(cbp->bio_data, cend - cstart);
875 cbp->bio_data = NULL;
876 }
877 if (data != NULL) {
878 cbp->bio_data = gj_malloc(cbp->bio_length,
879 flags);
880 if (cbp->bio_data != NULL) {
881 bcopy(data, cbp->bio_data,
882 cbp->bio_length);
883 }
884 data += cend - nstart;
885 }
886 joffset += cend - nstart;
887 nstart = cend;
888 GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
889 } else if (nstart > cstart && nend >= cend) {
890 /*
891 * +-----------------+ +-------------+
892 * | current bio | | current bio |
893 * | +-------------+ | +---------+---+
894 * | | | | | | |
895 * | | | | | | |
896 * +---+-------------+ +---+---------+ |
897 * | new bio | | new bio |
898 * +-------------+ +-------------+
899 */
900 g_journal_stats_bytes_skipped += cend - nstart;
901 nbp = g_journal_new_bio(nstart, cend, joffset, data,
902 flags);
903 nbp->bio_next = cbp->bio_next;
904 cbp->bio_next = nbp;
905 cbp->bio_length = nstart - cstart;
906 if (cbp->bio_data != NULL) {
907 cbp->bio_data = gj_realloc(cbp->bio_data,
908 cbp->bio_length, cend - cstart);
909 }
910 if (data != NULL)
911 data += cend - nstart;
912 joffset += cend - nstart;
913 nstart = cend;
914 n++;
915 GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
916 } else if (nstart > cstart && nend < cend) {
917 /*
918 * +---------------------+
919 * | current bio |
920 * | +-------------+ |
921 * | | | |
922 * | | | |
923 * +---+-------------+---+
924 * | new bio |
925 * +-------------+
926 */
927 g_journal_stats_bytes_skipped += nend - nstart;
928 nbp = g_journal_new_bio(nstart, nend, joffset, data,
929 flags);
930 nbp->bio_next = cbp->bio_next;
931 cbp->bio_next = nbp;
932 if (cbp->bio_data == NULL)
933 tmpdata = NULL;
934 else
935 tmpdata = cbp->bio_data + nend - cstart;
936 nbp = g_journal_new_bio(nend, cend,
937 cbp->bio_joffset + nend - cstart, tmpdata, flags);
938 nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
939 ((struct bio *)cbp->bio_next)->bio_next = nbp;
940 cbp->bio_length = nstart - cstart;
941 if (cbp->bio_data != NULL) {
942 cbp->bio_data = gj_realloc(cbp->bio_data,
943 cbp->bio_length, cend - cstart);
944 }
945 n += 2;
946 GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
947 goto end;
948 } else if (nstart <= cstart && nend < cend) {
949 /*
950 * +-----------------+ +-------------+
951 * | current bio | | current bio |
952 * +-------------+ | +---+---------+ |
953 * | | | | | | |
954 * | | | | | | |
955 * +-------------+---+ | +---------+---+
956 * | new bio | | new bio |
957 * +-------------+ +-------------+
958 */
959 g_journal_stats_bytes_skipped += nend - nstart;
960 nbp = g_journal_new_bio(nstart, nend, joffset, data,
961 flags);
962 if (pbp == NULL)
963 *head = nbp;
964 else
965 pbp->bio_next = nbp;
966 nbp->bio_next = cbp;
967 cbp->bio_offset = nend;
968 cbp->bio_length = cend - nend;
969 cbp->bio_joffset += nend - cstart;
970 tmpdata = cbp->bio_data;
971 if (tmpdata != NULL) {
972 cbp->bio_data = gj_malloc(cbp->bio_length,
973 flags);
974 if (cbp->bio_data != NULL) {
975 bcopy(tmpdata + nend - cstart,
976 cbp->bio_data, cbp->bio_length);
977 }
978 gj_free(tmpdata, cend - cstart);
979 }
980 n++;
981 GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
982 goto end;
983 }
984 if (nstart == nend)
985 goto end;
986 pbp = cbp;
987 }
988 nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
989 if (pbp == NULL)
990 *head = nbp;
991 else
992 pbp->bio_next = nbp;
993 nbp->bio_next = NULL;
994 n++;
995 GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
996 end:
997 if (g_journal_debug >= 3) {
998 GJQ_FOREACH(*head, cbp) {
999 GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1000 (intmax_t)cbp->bio_offset,
1001 (intmax_t)cbp->bio_length,
1002 (intmax_t)cbp->bio_joffset, cbp->bio_data);
1003 }
1004 GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1005 }
1006 return (n);
1007 }
1008
1009 /*
1010 * The function combines neighbour bios trying to squeeze as much data as
1011 * possible into one bio.
1012 *
1013 * The function returns the number of bios combined (negative value).
1014 */
1015 static int
1016 g_journal_optimize(struct bio *head)
1017 {
1018 struct bio *cbp, *pbp;
1019 int n;
1020
1021 n = 0;
1022 pbp = NULL;
1023 GJQ_FOREACH(head, cbp) {
1024 /* Skip bios which has to be read first. */
1025 if (cbp->bio_data == NULL) {
1026 pbp = NULL;
1027 continue;
1028 }
1029 /* There is no previous bio yet. */
1030 if (pbp == NULL) {
1031 pbp = cbp;
1032 continue;
1033 }
1034 /* Is this a neighbour bio? */
1035 if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1036 /* Be sure that bios queue is sorted. */
1037 KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1038 ("poffset=%jd plength=%jd coffset=%jd",
1039 (intmax_t)pbp->bio_offset,
1040 (intmax_t)pbp->bio_length,
1041 (intmax_t)cbp->bio_offset));
1042 pbp = cbp;
1043 continue;
1044 }
1045 /* Be sure we don't end up with too big bio. */
1046 if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1047 pbp = cbp;
1048 continue;
1049 }
1050 /* Ok, we can join bios. */
1051 GJ_LOGREQ(4, pbp, "Join: ");
1052 GJ_LOGREQ(4, cbp, "and: ");
1053 pbp->bio_data = gj_realloc(pbp->bio_data,
1054 pbp->bio_length + cbp->bio_length, pbp->bio_length);
1055 bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1056 cbp->bio_length);
1057 gj_free(cbp->bio_data, cbp->bio_length);
1058 pbp->bio_length += cbp->bio_length;
1059 pbp->bio_next = cbp->bio_next;
1060 g_destroy_bio(cbp);
1061 cbp = pbp;
1062 g_journal_stats_combined_ios++;
1063 n--;
1064 GJ_LOGREQ(4, pbp, "Got: ");
1065 }
1066 return (n);
1067 }
1068
1069 /*
1070 * TODO: Update comment.
1071 * These are functions responsible for copying one portion of data from journal
1072 * to the destination provider.
1073 * The order goes like this:
1074 * 1. Read the header, which contains informations about data blocks
1075 * following it.
1076 * 2. Read the data blocks from the journal.
1077 * 3. Write the data blocks on the data provider.
1078 *
1079 * g_journal_copy_start()
1080 * g_journal_copy_done() - got finished write request, logs potential errors.
1081 */
1082
1083 /*
1084 * When there is no data in cache, this function is used to read it.
1085 */
1086 static void
1087 g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1088 {
1089 struct bio *cbp;
1090
1091 /*
1092 * We were short in memory, so data was freed.
1093 * In that case we need to read it back from journal.
1094 */
1095 cbp = g_alloc_bio();
1096 cbp->bio_cflags = bp->bio_cflags;
1097 cbp->bio_parent = bp;
1098 cbp->bio_offset = bp->bio_joffset;
1099 cbp->bio_length = bp->bio_length;
1100 cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1101 cbp->bio_cmd = BIO_READ;
1102 cbp->bio_done = g_journal_std_done;
1103 GJ_LOGREQ(4, cbp, "READ FIRST");
1104 g_io_request(cbp, sc->sc_jconsumer);
1105 g_journal_cache_misses++;
1106 }
1107
1108 static void
1109 g_journal_copy_send(struct g_journal_softc *sc)
1110 {
1111 struct bio *bioq, *bp, *lbp;
1112
1113 bioq = lbp = NULL;
1114 mtx_lock(&sc->sc_mtx);
1115 for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1116 bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1117 if (bp == NULL)
1118 break;
1119 GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1120 sc->sc_copy_in_progress++;
1121 GJQ_INSERT_AFTER(bioq, bp, lbp);
1122 lbp = bp;
1123 }
1124 mtx_unlock(&sc->sc_mtx);
1125 if (g_journal_do_optimize)
1126 sc->sc_copy_in_progress += g_journal_optimize(bioq);
1127 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1128 GJQ_REMOVE(bioq, bp);
1129 GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1130 bp->bio_cflags = GJ_BIO_COPY;
1131 if (bp->bio_data == NULL)
1132 g_journal_read_first(sc, bp);
1133 else {
1134 bp->bio_joffset = 0;
1135 GJ_LOGREQ(4, bp, "SEND");
1136 g_io_request(bp, sc->sc_dconsumer);
1137 }
1138 }
1139 }
1140
1141 static void
1142 g_journal_copy_start(struct g_journal_softc *sc)
1143 {
1144
1145 /*
1146 * Remember in metadata that we're starting to copy journaled data
1147 * to the data provider.
1148 * In case of power failure, we will copy these data once again on boot.
1149 */
1150 if (!sc->sc_journal_copying) {
1151 sc->sc_journal_copying = 1;
1152 GJ_DEBUG(1, "Starting copy of journal.");
1153 g_journal_metadata_update(sc);
1154 }
1155 g_journal_copy_send(sc);
1156 }
1157
1158 /*
1159 * Data block has been read from the journal provider.
1160 */
1161 static int
1162 g_journal_copy_read_done(struct bio *bp)
1163 {
1164 struct g_journal_softc *sc;
1165 struct g_consumer *cp;
1166 struct bio *pbp;
1167
1168 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1169 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1170
1171 sc = bp->bio_from->geom->softc;
1172 pbp = bp->bio_parent;
1173
1174 if (bp->bio_error != 0) {
1175 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1176 bp->bio_to->name, bp->bio_error);
1177 /*
1178 * We will not be able to deliver WRITE request as well.
1179 */
1180 gj_free(bp->bio_data, bp->bio_length);
1181 g_destroy_bio(pbp);
1182 g_destroy_bio(bp);
1183 sc->sc_copy_in_progress--;
1184 return (1);
1185 }
1186 pbp->bio_data = bp->bio_data;
1187 cp = sc->sc_dconsumer;
1188 g_io_request(pbp, cp);
1189 GJ_LOGREQ(4, bp, "READ DONE");
1190 g_destroy_bio(bp);
1191 return (0);
1192 }
1193
1194 /*
1195 * Data block has been written to the data provider.
1196 */
1197 static void
1198 g_journal_copy_write_done(struct bio *bp)
1199 {
1200 struct g_journal_softc *sc;
1201
1202 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1203 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1204
1205 sc = bp->bio_from->geom->softc;
1206 sc->sc_copy_in_progress--;
1207
1208 if (bp->bio_error != 0) {
1209 GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1210 bp->bio_error);
1211 }
1212 GJQ_REMOVE(sc->sc_copy_queue, bp);
1213 gj_free(bp->bio_data, bp->bio_length);
1214 GJ_LOGREQ(4, bp, "DONE");
1215 g_destroy_bio(bp);
1216
1217 if (sc->sc_copy_in_progress == 0) {
1218 /*
1219 * This was the last write request for this journal.
1220 */
1221 GJ_DEBUG(1, "Data has been copied.");
1222 sc->sc_journal_copying = 0;
1223 }
1224 }
1225
1226 static void g_journal_flush_done(struct bio *bp);
1227
1228 /*
1229 * Flush one record onto active journal provider.
1230 */
1231 static void
1232 g_journal_flush(struct g_journal_softc *sc)
1233 {
1234 struct g_journal_record_header hdr;
1235 struct g_journal_entry *ent;
1236 struct g_provider *pp;
1237 struct bio **bioq;
1238 struct bio *bp, *fbp, *pbp;
1239 off_t joffset, size;
1240 u_char *data, hash[16];
1241 MD5_CTX ctx;
1242 u_int i;
1243
1244 if (sc->sc_current_count == 0)
1245 return;
1246
1247 size = 0;
1248 pp = sc->sc_jprovider;
1249 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1250 joffset = sc->sc_journal_offset;
1251
1252 GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1253 sc->sc_current_count, pp->name, (intmax_t)joffset);
1254
1255 /*
1256 * Store 'journal id', so we know to which journal this record belongs.
1257 */
1258 hdr.jrh_journal_id = sc->sc_journal_id;
1259 /* Could be less than g_journal_record_entries if called due timeout. */
1260 hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1261 strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1262
1263 bioq = &sc->sc_active.jj_queue;
1264 pbp = sc->sc_flush_queue;
1265
1266 fbp = g_alloc_bio();
1267 fbp->bio_parent = NULL;
1268 fbp->bio_cflags = GJ_BIO_JOURNAL;
1269 fbp->bio_offset = -1;
1270 fbp->bio_joffset = joffset;
1271 fbp->bio_length = pp->sectorsize;
1272 fbp->bio_cmd = BIO_WRITE;
1273 fbp->bio_done = g_journal_std_done;
1274 GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1275 pbp = fbp;
1276 fbp->bio_to = pp;
1277 GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1278 joffset += pp->sectorsize;
1279 sc->sc_flush_count++;
1280 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1281 MD5Init(&ctx);
1282
1283 for (i = 0; i < hdr.jrh_nentries; i++) {
1284 bp = sc->sc_current_queue;
1285 KASSERT(bp != NULL, ("NULL bp"));
1286 bp->bio_to = pp;
1287 GJ_LOGREQ(4, bp, "FLUSHED");
1288 sc->sc_current_queue = bp->bio_next;
1289 bp->bio_next = NULL;
1290 sc->sc_current_count--;
1291
1292 /* Add to the header. */
1293 ent = &hdr.jrh_entries[i];
1294 ent->je_offset = bp->bio_offset;
1295 ent->je_joffset = joffset;
1296 ent->je_length = bp->bio_length;
1297 size += ent->je_length;
1298
1299 data = bp->bio_data;
1300 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1301 MD5Update(&ctx, data, ent->je_length);
1302 bzero(bp, sizeof(*bp));
1303 bp->bio_cflags = GJ_BIO_JOURNAL;
1304 bp->bio_offset = ent->je_offset;
1305 bp->bio_joffset = ent->je_joffset;
1306 bp->bio_length = ent->je_length;
1307 bp->bio_data = data;
1308 bp->bio_cmd = BIO_WRITE;
1309 bp->bio_done = g_journal_std_done;
1310 GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1311 pbp = bp;
1312 bp->bio_to = pp;
1313 GJ_LOGREQ(4, bp, "FLUSH_OUT");
1314 joffset += bp->bio_length;
1315 sc->sc_flush_count++;
1316
1317 /*
1318 * Add request to the active sc_journal_queue queue.
1319 * This is our cache. After journal switch we don't have to
1320 * read the data from the inactive journal, because we keep
1321 * it in memory.
1322 */
1323 g_journal_insert(bioq, ent->je_offset,
1324 ent->je_offset + ent->je_length, ent->je_joffset, data,
1325 M_NOWAIT);
1326 }
1327
1328 /*
1329 * After all requests, store valid header.
1330 */
1331 data = gj_malloc(pp->sectorsize, M_WAITOK);
1332 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1333 MD5Final(hash, &ctx);
1334 bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1335 }
1336 g_journal_record_header_encode(&hdr, data);
1337 fbp->bio_data = data;
1338
1339 sc->sc_journal_offset = joffset;
1340
1341 g_journal_check_overflow(sc);
1342 }
1343
1344 /*
1345 * Flush request finished.
1346 */
1347 static void
1348 g_journal_flush_done(struct bio *bp)
1349 {
1350 struct g_journal_softc *sc;
1351 struct g_consumer *cp;
1352
1353 KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1354 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1355
1356 cp = bp->bio_from;
1357 sc = cp->geom->softc;
1358 sc->sc_flush_in_progress--;
1359
1360 if (bp->bio_error != 0) {
1361 GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1362 bp->bio_error);
1363 }
1364 gj_free(bp->bio_data, bp->bio_length);
1365 GJ_LOGREQ(4, bp, "DONE");
1366 g_destroy_bio(bp);
1367 }
1368
1369 static void g_journal_release_delayed(struct g_journal_softc *sc);
1370
1371 static void
1372 g_journal_flush_send(struct g_journal_softc *sc)
1373 {
1374 struct g_consumer *cp;
1375 struct bio *bioq, *bp, *lbp;
1376
1377 cp = sc->sc_jconsumer;
1378 bioq = lbp = NULL;
1379 while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1380 /* Send one flush requests to the active journal. */
1381 bp = GJQ_FIRST(sc->sc_flush_queue);
1382 if (bp != NULL) {
1383 GJQ_REMOVE(sc->sc_flush_queue, bp);
1384 sc->sc_flush_count--;
1385 bp->bio_offset = bp->bio_joffset;
1386 bp->bio_joffset = 0;
1387 sc->sc_flush_in_progress++;
1388 GJQ_INSERT_AFTER(bioq, bp, lbp);
1389 lbp = bp;
1390 }
1391 /* Try to release delayed requests. */
1392 g_journal_release_delayed(sc);
1393 /* If there are no requests to flush, leave. */
1394 if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1395 break;
1396 }
1397 if (g_journal_do_optimize)
1398 sc->sc_flush_in_progress += g_journal_optimize(bioq);
1399 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1400 GJQ_REMOVE(bioq, bp);
1401 GJ_LOGREQ(3, bp, "Flush request send");
1402 g_io_request(bp, cp);
1403 }
1404 }
1405
1406 static void
1407 g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1408 {
1409 int n;
1410
1411 GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1412 n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1413 sc->sc_current_count += n;
1414 n = g_journal_optimize(sc->sc_current_queue);
1415 sc->sc_current_count += n;
1416 /*
1417 * For requests which are added to the current queue we deliver
1418 * response immediately.
1419 */
1420 bp->bio_completed = bp->bio_length;
1421 g_io_deliver(bp, 0);
1422 if (sc->sc_current_count >= g_journal_record_entries) {
1423 /*
1424 * Let's flush one record onto active journal provider.
1425 */
1426 g_journal_flush(sc);
1427 }
1428 }
1429
1430 static void
1431 g_journal_release_delayed(struct g_journal_softc *sc)
1432 {
1433 struct bio *bp;
1434
1435 for (;;) {
1436 /* The flush queue is full, exit. */
1437 if (sc->sc_flush_count >= g_journal_accept_immediately)
1438 return;
1439 bp = bioq_takefirst(&sc->sc_delayed_queue);
1440 if (bp == NULL)
1441 return;
1442 sc->sc_delayed_count--;
1443 g_journal_add_current(sc, bp);
1444 }
1445 }
1446
1447 /*
1448 * Add I/O request to the current queue. If we have enough requests for one
1449 * journal record we flush them onto active journal provider.
1450 */
1451 static void
1452 g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1453 {
1454
1455 /*
1456 * The flush queue is full, we need to delay the request.
1457 */
1458 if (sc->sc_delayed_count > 0 ||
1459 sc->sc_flush_count >= g_journal_accept_immediately) {
1460 GJ_LOGREQ(4, bp, "DELAYED");
1461 bioq_insert_tail(&sc->sc_delayed_queue, bp);
1462 sc->sc_delayed_count++;
1463 return;
1464 }
1465
1466 KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1467 ("DELAYED queue not empty."));
1468 g_journal_add_current(sc, bp);
1469 }
1470
1471 static void g_journal_read_done(struct bio *bp);
1472
1473 /*
1474 * Try to find requested data in cache.
1475 */
1476 static struct bio *
1477 g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1478 off_t oend)
1479 {
1480 off_t cstart, cend;
1481 struct bio *bp;
1482
1483 GJQ_FOREACH(head, bp) {
1484 if (bp->bio_offset == -1)
1485 continue;
1486 cstart = MAX(ostart, bp->bio_offset);
1487 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1488 if (cend <= ostart)
1489 continue;
1490 else if (cstart >= oend) {
1491 if (!sorted)
1492 continue;
1493 else {
1494 bp = NULL;
1495 break;
1496 }
1497 }
1498 if (bp->bio_data == NULL)
1499 break;
1500 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1501 bp);
1502 bcopy(bp->bio_data + cstart - bp->bio_offset,
1503 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1504 pbp->bio_completed += cend - cstart;
1505 if (pbp->bio_completed == pbp->bio_length) {
1506 /*
1507 * Cool, the whole request was in cache, deliver happy
1508 * message.
1509 */
1510 g_io_deliver(pbp, 0);
1511 return (pbp);
1512 }
1513 break;
1514 }
1515 return (bp);
1516 }
1517
1518 /*
1519 * Try to find requested data in cache.
1520 */
1521 static struct bio *
1522 g_journal_read_queue_find(struct bio_queue *head, struct bio *pbp, off_t ostart,
1523 off_t oend)
1524 {
1525 off_t cstart, cend;
1526 struct bio *bp;
1527
1528 TAILQ_FOREACH(bp, head, bio_queue) {
1529 cstart = MAX(ostart, bp->bio_offset);
1530 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1531 if (cend <= ostart)
1532 continue;
1533 else if (cstart >= oend)
1534 continue;
1535 KASSERT(bp->bio_data != NULL,
1536 ("%s: bio_data == NULL", __func__));
1537 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1538 bp);
1539 bcopy(bp->bio_data + cstart - bp->bio_offset,
1540 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1541 pbp->bio_completed += cend - cstart;
1542 if (pbp->bio_completed == pbp->bio_length) {
1543 /*
1544 * Cool, the whole request was in cache, deliver happy
1545 * message.
1546 */
1547 g_io_deliver(pbp, 0);
1548 return (pbp);
1549 }
1550 break;
1551 }
1552 return (bp);
1553 }
1554
1555 /*
1556 * This function is used for colecting data on read.
1557 * The complexity is because parts of the data can be stored in four different
1558 * places:
1559 * - in delayed requests
1560 * - in memory - the data not yet send to the active journal provider
1561 * - in requests which are going to be sent to the active journal
1562 * - in the active journal
1563 * - in the inactive journal
1564 * - in the data provider
1565 */
1566 static void
1567 g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1568 off_t oend)
1569 {
1570 struct bio *bp, *nbp, *head;
1571 off_t cstart, cend;
1572 u_int i, sorted = 0;
1573
1574 GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1575
1576 cstart = cend = -1;
1577 bp = NULL;
1578 head = NULL;
1579 for (i = 0; i <= 5; i++) {
1580 switch (i) {
1581 case 0: /* Delayed requests. */
1582 head = NULL;
1583 sorted = 0;
1584 break;
1585 case 1: /* Not-yet-send data. */
1586 head = sc->sc_current_queue;
1587 sorted = 1;
1588 break;
1589 case 2: /* In-flight to the active journal. */
1590 head = sc->sc_flush_queue;
1591 sorted = 0;
1592 break;
1593 case 3: /* Active journal. */
1594 head = sc->sc_active.jj_queue;
1595 sorted = 1;
1596 break;
1597 case 4: /* Inactive journal. */
1598 /*
1599 * XXX: Here could be a race with g_journal_lowmem().
1600 */
1601 head = sc->sc_inactive.jj_queue;
1602 sorted = 1;
1603 break;
1604 case 5: /* In-flight to the data provider. */
1605 head = sc->sc_copy_queue;
1606 sorted = 0;
1607 break;
1608 default:
1609 panic("gjournal %s: i=%d", __func__, i);
1610 }
1611 if (i == 0)
1612 bp = g_journal_read_queue_find(&sc->sc_delayed_queue.queue, pbp, ostart, oend);
1613 else
1614 bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1615 if (bp == pbp) { /* Got the whole request. */
1616 GJ_DEBUG(2, "Got the whole request from %u.", i);
1617 return;
1618 } else if (bp != NULL) {
1619 cstart = MAX(ostart, bp->bio_offset);
1620 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1621 GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1622 i, (intmax_t)cstart, (intmax_t)cend);
1623 break;
1624 }
1625 }
1626 if (bp != NULL) {
1627 if (bp->bio_data == NULL) {
1628 nbp = g_duplicate_bio(pbp);
1629 nbp->bio_cflags = GJ_BIO_READ;
1630 nbp->bio_data =
1631 pbp->bio_data + cstart - pbp->bio_offset;
1632 nbp->bio_offset =
1633 bp->bio_joffset + cstart - bp->bio_offset;
1634 nbp->bio_length = cend - cstart;
1635 nbp->bio_done = g_journal_read_done;
1636 g_io_request(nbp, sc->sc_jconsumer);
1637 }
1638 /*
1639 * If we don't have the whole request yet, call g_journal_read()
1640 * recursively.
1641 */
1642 if (ostart < cstart)
1643 g_journal_read(sc, pbp, ostart, cstart);
1644 if (oend > cend)
1645 g_journal_read(sc, pbp, cend, oend);
1646 } else {
1647 /*
1648 * No data in memory, no data in journal.
1649 * Its time for asking data provider.
1650 */
1651 GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1652 nbp = g_duplicate_bio(pbp);
1653 nbp->bio_cflags = GJ_BIO_READ;
1654 nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1655 nbp->bio_offset = ostart;
1656 nbp->bio_length = oend - ostart;
1657 nbp->bio_done = g_journal_read_done;
1658 g_io_request(nbp, sc->sc_dconsumer);
1659 /* We have the whole request, return here. */
1660 return;
1661 }
1662 }
1663
1664 /*
1665 * Function responsible for handling finished READ requests.
1666 * Actually, g_std_done() could be used here, the only difference is that we
1667 * log error.
1668 */
1669 static void
1670 g_journal_read_done(struct bio *bp)
1671 {
1672 struct bio *pbp;
1673
1674 KASSERT(bp->bio_cflags == GJ_BIO_READ,
1675 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1676
1677 pbp = bp->bio_parent;
1678 pbp->bio_inbed++;
1679 pbp->bio_completed += bp->bio_length;
1680
1681 if (bp->bio_error != 0) {
1682 if (pbp->bio_error == 0)
1683 pbp->bio_error = bp->bio_error;
1684 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1685 bp->bio_to->name, bp->bio_error);
1686 }
1687 g_destroy_bio(bp);
1688 if (pbp->bio_children == pbp->bio_inbed &&
1689 pbp->bio_completed == pbp->bio_length) {
1690 /* We're done. */
1691 g_io_deliver(pbp, 0);
1692 }
1693 }
1694
1695 /*
1696 * Deactive current journal and active next one.
1697 */
1698 static void
1699 g_journal_switch(struct g_journal_softc *sc)
1700 {
1701 struct g_provider *pp;
1702
1703 if (JEMPTY(sc)) {
1704 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1705 pp = LIST_FIRST(&sc->sc_geom->provider);
1706 if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1707 sc->sc_flags |= GJF_DEVICE_CLEAN;
1708 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1709 g_journal_metadata_update(sc);
1710 }
1711 } else {
1712 GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1713
1714 pp = sc->sc_jprovider;
1715
1716 sc->sc_journal_previous_id = sc->sc_journal_id;
1717
1718 sc->sc_journal_id = sc->sc_journal_next_id;
1719 sc->sc_journal_next_id = arc4random();
1720
1721 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1722
1723 g_journal_write_header(sc);
1724
1725 sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1726 sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1727
1728 sc->sc_active.jj_offset =
1729 sc->sc_journal_offset - pp->sectorsize;
1730 sc->sc_active.jj_queue = NULL;
1731
1732 /*
1733 * Switch is done, start copying data from the (now) inactive
1734 * journal to the data provider.
1735 */
1736 g_journal_copy_start(sc);
1737 }
1738 mtx_lock(&sc->sc_mtx);
1739 sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1740 mtx_unlock(&sc->sc_mtx);
1741 }
1742
1743 static void
1744 g_journal_initialize(struct g_journal_softc *sc)
1745 {
1746
1747 sc->sc_journal_id = arc4random();
1748 sc->sc_journal_next_id = arc4random();
1749 sc->sc_journal_previous_id = sc->sc_journal_id;
1750 sc->sc_journal_offset = sc->sc_jstart;
1751 sc->sc_inactive.jj_offset = sc->sc_jstart;
1752 g_journal_write_header(sc);
1753 sc->sc_active.jj_offset = sc->sc_jstart;
1754 }
1755
1756 static void
1757 g_journal_mark_as_dirty(struct g_journal_softc *sc)
1758 {
1759 const struct g_journal_desc *desc;
1760 int i;
1761
1762 GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1763 for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1764 desc->jd_dirty(sc->sc_dconsumer);
1765 }
1766
1767 /*
1768 * Function read record header from the given journal.
1769 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1770 * and data on every call.
1771 */
1772 static int
1773 g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1774 void *data)
1775 {
1776 int error;
1777
1778 bzero(bp, sizeof(*bp));
1779 bp->bio_cmd = BIO_READ;
1780 bp->bio_done = NULL;
1781 bp->bio_offset = offset;
1782 bp->bio_length = cp->provider->sectorsize;
1783 bp->bio_data = data;
1784 g_io_request(bp, cp);
1785 error = biowait(bp, "gjs_read");
1786 return (error);
1787 }
1788
1789 #if 0
1790 /*
1791 * Function is called when we start the journal device and we detect that
1792 * one of the journals was not fully copied.
1793 * The purpose of this function is to read all records headers from journal
1794 * and placed them in the inactive queue, so we can start journal
1795 * synchronization process and the journal provider itself.
1796 * Design decision was taken to not synchronize the whole journal here as it
1797 * can take too much time. Reading headers only and delaying synchronization
1798 * process until after journal provider is started should be the best choice.
1799 */
1800 #endif
1801
1802 static void
1803 g_journal_sync(struct g_journal_softc *sc)
1804 {
1805 struct g_journal_record_header rhdr;
1806 struct g_journal_entry *ent;
1807 struct g_journal_header jhdr;
1808 struct g_consumer *cp;
1809 struct bio *bp, *fbp, *tbp;
1810 off_t joffset, offset;
1811 u_char *buf, sum[16];
1812 uint64_t id;
1813 MD5_CTX ctx;
1814 int error, found, i;
1815
1816 found = 0;
1817 fbp = NULL;
1818 cp = sc->sc_jconsumer;
1819 bp = g_alloc_bio();
1820 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1821 offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1822
1823 GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1824
1825 /*
1826 * Read and decode first journal header.
1827 */
1828 error = g_journal_sync_read(cp, bp, offset, buf);
1829 if (error != 0) {
1830 GJ_DEBUG(0, "Error while reading journal header from %s.",
1831 cp->provider->name);
1832 goto end;
1833 }
1834 error = g_journal_header_decode(buf, &jhdr);
1835 if (error != 0) {
1836 GJ_DEBUG(0, "Cannot decode journal header from %s.",
1837 cp->provider->name);
1838 goto end;
1839 }
1840 id = sc->sc_journal_id;
1841 if (jhdr.jh_journal_id != sc->sc_journal_id) {
1842 GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1843 (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1844 goto end;
1845 }
1846 offset += cp->provider->sectorsize;
1847 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1848
1849 for (;;) {
1850 /*
1851 * If the biggest record won't fit, look for a record header or
1852 * journal header from the begining.
1853 */
1854 GJ_VALIDATE_OFFSET(offset, sc);
1855 error = g_journal_sync_read(cp, bp, offset, buf);
1856 if (error != 0) {
1857 /*
1858 * Not good. Having an error while reading header
1859 * means, that we cannot read next headers and in
1860 * consequence we cannot find termination.
1861 */
1862 GJ_DEBUG(0,
1863 "Error while reading record header from %s.",
1864 cp->provider->name);
1865 break;
1866 }
1867
1868 error = g_journal_record_header_decode(buf, &rhdr);
1869 if (error != 0) {
1870 GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1871 (intmax_t)offset, error);
1872 /*
1873 * This is not a record header.
1874 * If we are lucky, this is next journal header.
1875 */
1876 error = g_journal_header_decode(buf, &jhdr);
1877 if (error != 0) {
1878 GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1879 (intmax_t)offset, error);
1880 /*
1881 * Nope, this is not journal header, which
1882 * bascially means that journal is not
1883 * terminated properly.
1884 */
1885 error = ENOENT;
1886 break;
1887 }
1888 /*
1889 * Ok. This is header of _some_ journal. Now we need to
1890 * verify if this is header of the _next_ journal.
1891 */
1892 if (jhdr.jh_journal_id != id) {
1893 GJ_DEBUG(1, "Journal ID mismatch at %jd "
1894 "(0x%08x != 0x%08x).", (intmax_t)offset,
1895 (u_int)jhdr.jh_journal_id, (u_int)id);
1896 error = ENOENT;
1897 break;
1898 }
1899
1900 /* Found termination. */
1901 found++;
1902 GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1903 (intmax_t)offset, (u_int)id);
1904 sc->sc_active.jj_offset = offset;
1905 sc->sc_journal_offset =
1906 offset + cp->provider->sectorsize;
1907 sc->sc_journal_id = id;
1908 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1909
1910 while ((tbp = fbp) != NULL) {
1911 fbp = tbp->bio_next;
1912 GJ_LOGREQ(3, tbp, "Adding request.");
1913 g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1914 tbp, M_WAITOK);
1915 }
1916
1917 /* Skip journal's header. */
1918 offset += cp->provider->sectorsize;
1919 continue;
1920 }
1921
1922 /* Skip record's header. */
1923 offset += cp->provider->sectorsize;
1924
1925 /*
1926 * Add information about every record entry to the inactive
1927 * queue.
1928 */
1929 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1930 MD5Init(&ctx);
1931 for (i = 0; i < rhdr.jrh_nentries; i++) {
1932 ent = &rhdr.jrh_entries[i];
1933 GJ_DEBUG(3, "Insert entry: %jd %jd.",
1934 (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1935 g_journal_insert(&fbp, ent->je_offset,
1936 ent->je_offset + ent->je_length, ent->je_joffset,
1937 NULL, M_WAITOK);
1938 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1939 u_char *buf2;
1940
1941 /*
1942 * TODO: Should use faster function (like
1943 * g_journal_sync_read()).
1944 */
1945 buf2 = g_read_data(cp, offset, ent->je_length,
1946 NULL);
1947 if (buf2 == NULL)
1948 GJ_DEBUG(0, "Cannot read data at %jd.",
1949 (intmax_t)offset);
1950 else {
1951 MD5Update(&ctx, buf2, ent->je_length);
1952 g_free(buf2);
1953 }
1954 }
1955 /* Skip entry's data. */
1956 offset += ent->je_length;
1957 }
1958 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1959 MD5Final(sum, &ctx);
1960 if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1961 GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1962 (intmax_t)offset);
1963 }
1964 }
1965 }
1966 end:
1967 gj_free(bp->bio_data, cp->provider->sectorsize);
1968 g_destroy_bio(bp);
1969
1970 /* Remove bios from unterminated journal. */
1971 while ((tbp = fbp) != NULL) {
1972 fbp = tbp->bio_next;
1973 g_destroy_bio(tbp);
1974 }
1975
1976 if (found < 1 && joffset > 0) {
1977 GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1978 sc->sc_name);
1979 while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1980 sc->sc_inactive.jj_queue = tbp->bio_next;
1981 g_destroy_bio(tbp);
1982 }
1983 g_journal_initialize(sc);
1984 g_journal_mark_as_dirty(sc);
1985 } else {
1986 GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1987 g_journal_copy_start(sc);
1988 }
1989 }
1990
1991 /*
1992 * Wait for requests.
1993 * If we have requests in the current queue, flush them after 3 seconds from the
1994 * last flush. In this way we don't wait forever (or for journal switch) with
1995 * storing not full records on journal.
1996 */
1997 static void
1998 g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1999 {
2000 int error, timeout;
2001
2002 GJ_DEBUG(3, "%s: enter", __func__);
2003 if (sc->sc_current_count == 0) {
2004 if (g_journal_debug < 2)
2005 msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
2006 else {
2007 /*
2008 * If we have debug turned on, show number of elements
2009 * in various queues.
2010 */
2011 for (;;) {
2012 error = msleep(sc, &sc->sc_mtx, PRIBIO,
2013 "gj:work", hz * 3);
2014 if (error == 0) {
2015 mtx_unlock(&sc->sc_mtx);
2016 break;
2017 }
2018 GJ_DEBUG(3, "Report: current count=%d",
2019 sc->sc_current_count);
2020 GJ_DEBUG(3, "Report: flush count=%d",
2021 sc->sc_flush_count);
2022 GJ_DEBUG(3, "Report: flush in progress=%d",
2023 sc->sc_flush_in_progress);
2024 GJ_DEBUG(3, "Report: copy in progress=%d",
2025 sc->sc_copy_in_progress);
2026 GJ_DEBUG(3, "Report: delayed=%d",
2027 sc->sc_delayed_count);
2028 }
2029 }
2030 GJ_DEBUG(3, "%s: exit 1", __func__);
2031 return;
2032 }
2033
2034 /*
2035 * Flush even not full records every 3 seconds.
2036 */
2037 timeout = (last_write + 3 - time_second) * hz;
2038 if (timeout <= 0) {
2039 mtx_unlock(&sc->sc_mtx);
2040 g_journal_flush(sc);
2041 g_journal_flush_send(sc);
2042 GJ_DEBUG(3, "%s: exit 2", __func__);
2043 return;
2044 }
2045 error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2046 if (error == EWOULDBLOCK)
2047 g_journal_flush_send(sc);
2048 GJ_DEBUG(3, "%s: exit 3", __func__);
2049 }
2050
2051 /*
2052 * Worker thread.
2053 */
2054 static void
2055 g_journal_worker(void *arg)
2056 {
2057 struct g_journal_softc *sc;
2058 struct g_geom *gp;
2059 struct g_provider *pp;
2060 struct bio *bp;
2061 time_t last_write;
2062 int type;
2063
2064 thread_lock(curthread);
2065 sched_prio(curthread, PRIBIO);
2066 thread_unlock(curthread);
2067
2068 sc = arg;
2069 type = 0; /* gcc */
2070
2071 if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2072 GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2073 g_journal_initialize(sc);
2074 } else {
2075 g_journal_sync(sc);
2076 }
2077 /*
2078 * Check if we can use BIO_FLUSH.
2079 */
2080 sc->sc_bio_flush = 0;
2081 if (g_io_flush(sc->sc_jconsumer) == 0) {
2082 sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2083 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2084 sc->sc_jconsumer->provider->name);
2085 } else {
2086 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2087 sc->sc_jconsumer->provider->name);
2088 }
2089 if (sc->sc_jconsumer != sc->sc_dconsumer) {
2090 if (g_io_flush(sc->sc_dconsumer) == 0) {
2091 sc->sc_bio_flush |= GJ_FLUSH_DATA;
2092 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2093 sc->sc_dconsumer->provider->name);
2094 } else {
2095 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2096 sc->sc_dconsumer->provider->name);
2097 }
2098 }
2099
2100 gp = sc->sc_geom;
2101 g_topology_lock();
2102 pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2103 pp->mediasize = sc->sc_mediasize;
2104 /*
2105 * There could be a problem when data provider and journal providers
2106 * have different sectorsize, but such scenario is prevented on journal
2107 * creation.
2108 */
2109 pp->sectorsize = sc->sc_sectorsize;
2110 g_error_provider(pp, 0);
2111 g_topology_unlock();
2112 last_write = time_second;
2113
2114 if (sc->sc_rootmount != NULL) {
2115 GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2116 root_mount_rel(sc->sc_rootmount);
2117 sc->sc_rootmount = NULL;
2118 }
2119
2120 for (;;) {
2121 /* Get first request from the queue. */
2122 mtx_lock(&sc->sc_mtx);
2123 bp = bioq_first(&sc->sc_back_queue);
2124 if (bp != NULL)
2125 type = (bp->bio_cflags & GJ_BIO_MASK);
2126 if (bp == NULL) {
2127 bp = bioq_first(&sc->sc_regular_queue);
2128 if (bp != NULL)
2129 type = GJ_BIO_REGULAR;
2130 }
2131 if (bp == NULL) {
2132 try_switch:
2133 if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2134 (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2135 if (sc->sc_current_count > 0) {
2136 mtx_unlock(&sc->sc_mtx);
2137 g_journal_flush(sc);
2138 g_journal_flush_send(sc);
2139 continue;
2140 }
2141 if (sc->sc_flush_in_progress > 0)
2142 goto sleep;
2143 if (sc->sc_copy_in_progress > 0)
2144 goto sleep;
2145 }
2146 if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2147 mtx_unlock(&sc->sc_mtx);
2148 g_journal_switch(sc);
2149 wakeup(&sc->sc_journal_copying);
2150 continue;
2151 }
2152 if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2153 GJ_DEBUG(1, "Shutting down worker "
2154 "thread for %s.", gp->name);
2155 sc->sc_worker = NULL;
2156 wakeup(&sc->sc_worker);
2157 mtx_unlock(&sc->sc_mtx);
2158 kproc_exit(0);
2159 }
2160 sleep:
2161 g_journal_wait(sc, last_write);
2162 continue;
2163 }
2164 /*
2165 * If we're in switch process, we need to delay all new
2166 * write requests until its done.
2167 */
2168 if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2169 type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2170 GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2171 goto try_switch;
2172 }
2173 if (type == GJ_BIO_REGULAR)
2174 bioq_remove(&sc->sc_regular_queue, bp);
2175 else
2176 bioq_remove(&sc->sc_back_queue, bp);
2177 mtx_unlock(&sc->sc_mtx);
2178 switch (type) {
2179 case GJ_BIO_REGULAR:
2180 /* Regular request. */
2181 switch (bp->bio_cmd) {
2182 case BIO_READ:
2183 g_journal_read(sc, bp, bp->bio_offset,
2184 bp->bio_offset + bp->bio_length);
2185 break;
2186 case BIO_WRITE:
2187 last_write = time_second;
2188 g_journal_add_request(sc, bp);
2189 g_journal_flush_send(sc);
2190 break;
2191 default:
2192 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2193 }
2194 break;
2195 case GJ_BIO_COPY:
2196 switch (bp->bio_cmd) {
2197 case BIO_READ:
2198 if (g_journal_copy_read_done(bp))
2199 g_journal_copy_send(sc);
2200 break;
2201 case BIO_WRITE:
2202 g_journal_copy_write_done(bp);
2203 g_journal_copy_send(sc);
2204 break;
2205 default:
2206 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2207 }
2208 break;
2209 case GJ_BIO_JOURNAL:
2210 g_journal_flush_done(bp);
2211 g_journal_flush_send(sc);
2212 break;
2213 case GJ_BIO_READ:
2214 default:
2215 panic("Invalid bio (%d).", type);
2216 }
2217 }
2218 }
2219
2220 static void
2221 g_journal_destroy_event(void *arg, int flags __unused)
2222 {
2223 struct g_journal_softc *sc;
2224
2225 g_topology_assert();
2226 sc = arg;
2227 g_journal_destroy(sc);
2228 }
2229
2230 static void
2231 g_journal_timeout(void *arg)
2232 {
2233 struct g_journal_softc *sc;
2234
2235 sc = arg;
2236 GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2237 sc->sc_geom->name);
2238 g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2239 }
2240
2241 static struct g_geom *
2242 g_journal_create(struct g_class *mp, struct g_provider *pp,
2243 const struct g_journal_metadata *md)
2244 {
2245 struct g_journal_softc *sc;
2246 struct g_geom *gp;
2247 struct g_consumer *cp;
2248 int error;
2249
2250 sc = NULL; /* gcc */
2251
2252 g_topology_assert();
2253 /*
2254 * There are two possibilities:
2255 * 1. Data and both journals are on the same provider.
2256 * 2. Data and journals are all on separated providers.
2257 */
2258 /* Look for journal device with the same ID. */
2259 LIST_FOREACH(gp, &mp->geom, geom) {
2260 sc = gp->softc;
2261 if (sc == NULL)
2262 continue;
2263 if (sc->sc_id == md->md_id)
2264 break;
2265 }
2266 if (gp == NULL)
2267 sc = NULL;
2268 else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2269 GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2270 return (NULL);
2271 }
2272 if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2273 GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2274 return (NULL);
2275 }
2276 if (md->md_type & GJ_TYPE_DATA) {
2277 GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2278 pp->name);
2279 }
2280 if (md->md_type & GJ_TYPE_JOURNAL) {
2281 GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2282 pp->name);
2283 }
2284
2285 if (sc == NULL) {
2286 /* Action geom. */
2287 sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2288 sc->sc_id = md->md_id;
2289 sc->sc_type = 0;
2290 sc->sc_flags = 0;
2291 sc->sc_worker = NULL;
2292
2293 gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2294 gp->start = g_journal_start;
2295 gp->orphan = g_journal_orphan;
2296 gp->access = g_journal_access;
2297 gp->softc = sc;
2298 gp->flags |= G_GEOM_VOLATILE_BIO;
2299 sc->sc_geom = gp;
2300
2301 mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2302
2303 bioq_init(&sc->sc_back_queue);
2304 bioq_init(&sc->sc_regular_queue);
2305 bioq_init(&sc->sc_delayed_queue);
2306 sc->sc_delayed_count = 0;
2307 sc->sc_current_queue = NULL;
2308 sc->sc_current_count = 0;
2309 sc->sc_flush_queue = NULL;
2310 sc->sc_flush_count = 0;
2311 sc->sc_flush_in_progress = 0;
2312 sc->sc_copy_queue = NULL;
2313 sc->sc_copy_in_progress = 0;
2314 sc->sc_inactive.jj_queue = NULL;
2315 sc->sc_active.jj_queue = NULL;
2316
2317 sc->sc_rootmount = root_mount_hold("GJOURNAL");
2318 GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2319
2320 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2321 if (md->md_type != GJ_TYPE_COMPLETE) {
2322 /*
2323 * Journal and data are on separate providers.
2324 * At this point we have only one of them.
2325 * We setup a timeout in case the other part will not
2326 * appear, so we won't wait forever.
2327 */
2328 callout_reset(&sc->sc_callout, 5 * hz,
2329 g_journal_timeout, sc);
2330 }
2331 }
2332
2333 /* Remember type of the data provider. */
2334 if (md->md_type & GJ_TYPE_DATA)
2335 sc->sc_orig_type = md->md_type;
2336 sc->sc_type |= md->md_type;
2337 cp = NULL;
2338
2339 if (md->md_type & GJ_TYPE_DATA) {
2340 if (md->md_flags & GJ_FLAG_CLEAN)
2341 sc->sc_flags |= GJF_DEVICE_CLEAN;
2342 if (md->md_flags & GJ_FLAG_CHECKSUM)
2343 sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2344 cp = g_new_consumer(gp);
2345 error = g_attach(cp, pp);
2346 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2347 pp->name, error));
2348 error = g_access(cp, 1, 1, 1);
2349 if (error != 0) {
2350 GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2351 error);
2352 g_journal_destroy(sc);
2353 return (NULL);
2354 }
2355 sc->sc_dconsumer = cp;
2356 sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2357 sc->sc_sectorsize = pp->sectorsize;
2358 sc->sc_jstart = md->md_jstart;
2359 sc->sc_jend = md->md_jend;
2360 if (md->md_provider[0] != '\0')
2361 sc->sc_flags |= GJF_DEVICE_HARDCODED;
2362 sc->sc_journal_offset = md->md_joffset;
2363 sc->sc_journal_id = md->md_jid;
2364 sc->sc_journal_previous_id = md->md_jid;
2365 }
2366 if (md->md_type & GJ_TYPE_JOURNAL) {
2367 if (cp == NULL) {
2368 cp = g_new_consumer(gp);
2369 error = g_attach(cp, pp);
2370 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2371 pp->name, error));
2372 error = g_access(cp, 1, 1, 1);
2373 if (error != 0) {
2374 GJ_DEBUG(0, "Cannot access %s (error=%d).",
2375 pp->name, error);
2376 g_journal_destroy(sc);
2377 return (NULL);
2378 }
2379 } else {
2380 /*
2381 * Journal is on the same provider as data, which means
2382 * that data provider ends where journal starts.
2383 */
2384 sc->sc_mediasize = md->md_jstart;
2385 }
2386 sc->sc_jconsumer = cp;
2387 }
2388
2389 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2390 /* Journal is not complete yet. */
2391 return (gp);
2392 } else {
2393 /* Journal complete, cancel timeout. */
2394 callout_drain(&sc->sc_callout);
2395 }
2396
2397 error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2398 "g_journal %s", sc->sc_name);
2399 if (error != 0) {
2400 GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2401 sc->sc_name);
2402 g_journal_destroy(sc);
2403 return (NULL);
2404 }
2405
2406 return (gp);
2407 }
2408
2409 static void
2410 g_journal_destroy_consumer(void *arg, int flags __unused)
2411 {
2412 struct g_consumer *cp;
2413
2414 g_topology_assert();
2415 cp = arg;
2416 g_detach(cp);
2417 g_destroy_consumer(cp);
2418 }
2419
2420 static int
2421 g_journal_destroy(struct g_journal_softc *sc)
2422 {
2423 struct g_geom *gp;
2424 struct g_provider *pp;
2425 struct g_consumer *cp;
2426
2427 g_topology_assert();
2428
2429 if (sc == NULL)
2430 return (ENXIO);
2431
2432 gp = sc->sc_geom;
2433 pp = LIST_FIRST(&gp->provider);
2434 if (pp != NULL) {
2435 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2436 GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2437 pp->name, pp->acr, pp->acw, pp->ace);
2438 return (EBUSY);
2439 }
2440 g_error_provider(pp, ENXIO);
2441
2442 g_journal_flush(sc);
2443 g_journal_flush_send(sc);
2444 g_journal_switch(sc);
2445 }
2446
2447 sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2448
2449 g_topology_unlock();
2450
2451 if (sc->sc_rootmount != NULL) {
2452 GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2453 root_mount_rel(sc->sc_rootmount);
2454 sc->sc_rootmount = NULL;
2455 }
2456
2457 callout_drain(&sc->sc_callout);
2458 mtx_lock(&sc->sc_mtx);
2459 wakeup(sc);
2460 while (sc->sc_worker != NULL)
2461 msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2462 mtx_unlock(&sc->sc_mtx);
2463
2464 if (pp != NULL) {
2465 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2466 g_journal_metadata_update(sc);
2467 g_topology_lock();
2468 pp->flags |= G_PF_WITHER;
2469 g_orphan_provider(pp, ENXIO);
2470 } else {
2471 g_topology_lock();
2472 }
2473 mtx_destroy(&sc->sc_mtx);
2474
2475 if (sc->sc_current_count != 0) {
2476 GJ_DEBUG(0, "Warning! Number of current requests %d.",
2477 sc->sc_current_count);
2478 }
2479
2480 LIST_FOREACH(cp, &gp->consumer, consumer) {
2481 if (cp->acr + cp->acw + cp->ace > 0)
2482 g_access(cp, -1, -1, -1);
2483 /*
2484 * We keep all consumers open for writting, so if I'll detach
2485 * and destroy consumer here, I'll get providers for taste, so
2486 * journal will be started again.
2487 * Sending an event here, prevents this from happening.
2488 */
2489 g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2490 }
2491 gp->softc = NULL;
2492 g_wither_geom(gp, ENXIO);
2493 free(sc, M_JOURNAL);
2494 return (0);
2495 }
2496
2497 static void
2498 g_journal_taste_orphan(struct g_consumer *cp)
2499 {
2500
2501 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2502 cp->provider->name));
2503 }
2504
2505 static struct g_geom *
2506 g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2507 {
2508 struct g_journal_metadata md;
2509 struct g_consumer *cp;
2510 struct g_geom *gp;
2511 int error;
2512
2513 g_topology_assert();
2514 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2515 GJ_DEBUG(2, "Tasting %s.", pp->name);
2516 if (pp->geom->class == mp)
2517 return (NULL);
2518
2519 gp = g_new_geomf(mp, "journal:taste");
2520 /* This orphan function should be never called. */
2521 gp->orphan = g_journal_taste_orphan;
2522 cp = g_new_consumer(gp);
2523 g_attach(cp, pp);
2524 error = g_journal_metadata_read(cp, &md);
2525 g_detach(cp);
2526 g_destroy_consumer(cp);
2527 g_destroy_geom(gp);
2528 if (error != 0)
2529 return (NULL);
2530 gp = NULL;
2531
2532 if (md.md_provider[0] != '\0' &&
2533 !g_compare_names(md.md_provider, pp->name))
2534 return (NULL);
2535 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2536 return (NULL);
2537 if (g_journal_debug >= 2)
2538 journal_metadata_dump(&md);
2539
2540 gp = g_journal_create(mp, pp, &md);
2541 return (gp);
2542 }
2543
2544 static struct g_journal_softc *
2545 g_journal_find_device(struct g_class *mp, const char *name)
2546 {
2547 struct g_journal_softc *sc;
2548 struct g_geom *gp;
2549 struct g_provider *pp;
2550
2551 if (strncmp(name, "/dev/", 5) == 0)
2552 name += 5;
2553 LIST_FOREACH(gp, &mp->geom, geom) {
2554 sc = gp->softc;
2555 if (sc == NULL)
2556 continue;
2557 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2558 continue;
2559 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2560 continue;
2561 pp = LIST_FIRST(&gp->provider);
2562 if (strcmp(sc->sc_name, name) == 0)
2563 return (sc);
2564 if (pp != NULL && strcmp(pp->name, name) == 0)
2565 return (sc);
2566 }
2567 return (NULL);
2568 }
2569
2570 static void
2571 g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2572 {
2573 struct g_journal_softc *sc;
2574 const char *name;
2575 char param[16];
2576 int *nargs;
2577 int error, i;
2578
2579 g_topology_assert();
2580
2581 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2582 if (nargs == NULL) {
2583 gctl_error(req, "No '%s' argument.", "nargs");
2584 return;
2585 }
2586 if (*nargs <= 0) {
2587 gctl_error(req, "Missing device(s).");
2588 return;
2589 }
2590
2591 for (i = 0; i < *nargs; i++) {
2592 snprintf(param, sizeof(param), "arg%d", i);
2593 name = gctl_get_asciiparam(req, param);
2594 if (name == NULL) {
2595 gctl_error(req, "No 'arg%d' argument.", i);
2596 return;
2597 }
2598 sc = g_journal_find_device(mp, name);
2599 if (sc == NULL) {
2600 gctl_error(req, "No such device: %s.", name);
2601 return;
2602 }
2603 error = g_journal_destroy(sc);
2604 if (error != 0) {
2605 gctl_error(req, "Cannot destroy device %s (error=%d).",
2606 LIST_FIRST(&sc->sc_geom->provider)->name, error);
2607 return;
2608 }
2609 }
2610 }
2611
2612 static void
2613 g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2614 {
2615
2616 g_topology_assert();
2617 g_topology_unlock();
2618 g_journal_sync_requested++;
2619 wakeup(&g_journal_switcher_state);
2620 while (g_journal_sync_requested > 0)
2621 tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2622 g_topology_lock();
2623 }
2624
2625 static void
2626 g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2627 {
2628 uint32_t *version;
2629
2630 g_topology_assert();
2631
2632 version = gctl_get_paraml(req, "version", sizeof(*version));
2633 if (version == NULL) {
2634 gctl_error(req, "No '%s' argument.", "version");
2635 return;
2636 }
2637 if (*version != G_JOURNAL_VERSION) {
2638 gctl_error(req, "Userland and kernel parts are out of sync.");
2639 return;
2640 }
2641
2642 if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2643 g_journal_ctl_destroy(req, mp);
2644 return;
2645 } else if (strcmp(verb, "sync") == 0) {
2646 g_journal_ctl_sync(req, mp);
2647 return;
2648 }
2649
2650 gctl_error(req, "Unknown verb.");
2651 }
2652
2653 static void
2654 g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2655 struct g_consumer *cp, struct g_provider *pp)
2656 {
2657 struct g_journal_softc *sc;
2658
2659 g_topology_assert();
2660
2661 sc = gp->softc;
2662 if (sc == NULL)
2663 return;
2664 if (pp != NULL) {
2665 /* Nothing here. */
2666 } else if (cp != NULL) {
2667 int first = 1;
2668
2669 sbuf_printf(sb, "%s<Role>", indent);
2670 if (cp == sc->sc_dconsumer) {
2671 sbuf_printf(sb, "Data");
2672 first = 0;
2673 }
2674 if (cp == sc->sc_jconsumer) {
2675 if (!first)
2676 sbuf_printf(sb, ",");
2677 sbuf_printf(sb, "Journal");
2678 }
2679 sbuf_printf(sb, "</Role>\n");
2680 if (cp == sc->sc_jconsumer) {
2681 sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2682 (intmax_t)sc->sc_jstart);
2683 sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2684 (intmax_t)sc->sc_jend);
2685 }
2686 } else {
2687 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2688 }
2689 }
2690
2691 static eventhandler_tag g_journal_event_shutdown = NULL;
2692 static eventhandler_tag g_journal_event_lowmem = NULL;
2693
2694 static void
2695 g_journal_shutdown(void *arg, int howto __unused)
2696 {
2697 struct g_class *mp;
2698 struct g_geom *gp, *gp2;
2699
2700 if (panicstr != NULL)
2701 return;
2702 mp = arg;
2703 DROP_GIANT();
2704 g_topology_lock();
2705 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2706 if (gp->softc == NULL)
2707 continue;
2708 GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2709 g_journal_destroy(gp->softc);
2710 }
2711 g_topology_unlock();
2712 PICKUP_GIANT();
2713 }
2714
2715 /*
2716 * Free cached requests from inactive queue in case of low memory.
2717 * We free GJ_FREE_AT_ONCE elements at once.
2718 */
2719 #define GJ_FREE_AT_ONCE 4
2720 static void
2721 g_journal_lowmem(void *arg, int howto __unused)
2722 {
2723 struct g_journal_softc *sc;
2724 struct g_class *mp;
2725 struct g_geom *gp;
2726 struct bio *bp;
2727 u_int nfree = GJ_FREE_AT_ONCE;
2728
2729 g_journal_stats_low_mem++;
2730 mp = arg;
2731 DROP_GIANT();
2732 g_topology_lock();
2733 LIST_FOREACH(gp, &mp->geom, geom) {
2734 sc = gp->softc;
2735 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2736 continue;
2737 mtx_lock(&sc->sc_mtx);
2738 for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2739 nfree--, bp = bp->bio_next) {
2740 /*
2741 * This is safe to free the bio_data, because:
2742 * 1. If bio_data is NULL it will be read from the
2743 * inactive journal.
2744 * 2. If bp is sent down, it is first removed from the
2745 * inactive queue, so it's impossible to free the
2746 * data from under in-flight bio.
2747 * On the other hand, freeing elements from the active
2748 * queue, is not safe.
2749 */
2750 if (bp->bio_data != NULL) {
2751 GJ_DEBUG(2, "Freeing data from %s.",
2752 sc->sc_name);
2753 gj_free(bp->bio_data, bp->bio_length);
2754 bp->bio_data = NULL;
2755 }
2756 }
2757 mtx_unlock(&sc->sc_mtx);
2758 if (nfree == 0)
2759 break;
2760 }
2761 g_topology_unlock();
2762 PICKUP_GIANT();
2763 }
2764
2765 static void g_journal_switcher(void *arg);
2766
2767 static void
2768 g_journal_init(struct g_class *mp)
2769 {
2770 int error;
2771
2772 /* Pick a conservative value if provided value sucks. */
2773 if (g_journal_cache_divisor <= 0 ||
2774 (vm_kmem_size / g_journal_cache_divisor == 0)) {
2775 g_journal_cache_divisor = 5;
2776 }
2777 if (g_journal_cache_limit > 0) {
2778 g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2779 g_journal_cache_low =
2780 (g_journal_cache_limit / 100) * g_journal_cache_switch;
2781 }
2782 g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2783 g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2784 if (g_journal_event_shutdown == NULL)
2785 GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2786 g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2787 g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2788 if (g_journal_event_lowmem == NULL)
2789 GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2790 error = kproc_create(g_journal_switcher, mp, NULL, 0, 0,
2791 "g_journal switcher");
2792 KASSERT(error == 0, ("Cannot create switcher thread."));
2793 }
2794
2795 static void
2796 g_journal_fini(struct g_class *mp)
2797 {
2798
2799 if (g_journal_event_shutdown != NULL) {
2800 EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2801 g_journal_event_shutdown);
2802 }
2803 if (g_journal_event_lowmem != NULL)
2804 EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2805 g_journal_switcher_state = GJ_SWITCHER_DIE;
2806 wakeup(&g_journal_switcher_state);
2807 while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2808 tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2809 GJ_DEBUG(1, "Switcher died.");
2810 }
2811
2812 DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2813
2814 static const struct g_journal_desc *
2815 g_journal_find_desc(const char *fstype)
2816 {
2817 const struct g_journal_desc *desc;
2818 int i;
2819
2820 for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2821 desc = g_journal_filesystems[++i]) {
2822 if (strcmp(desc->jd_fstype, fstype) == 0)
2823 break;
2824 }
2825 return (desc);
2826 }
2827
2828 static void
2829 g_journal_switch_wait(struct g_journal_softc *sc)
2830 {
2831 struct bintime bt;
2832
2833 mtx_assert(&sc->sc_mtx, MA_OWNED);
2834 if (g_journal_debug >= 2) {
2835 if (sc->sc_flush_in_progress > 0) {
2836 GJ_DEBUG(2, "%d requests flushing.",
2837 sc->sc_flush_in_progress);
2838 }
2839 if (sc->sc_copy_in_progress > 0) {
2840 GJ_DEBUG(2, "%d requests copying.",
2841 sc->sc_copy_in_progress);
2842 }
2843 if (sc->sc_flush_count > 0) {
2844 GJ_DEBUG(2, "%d requests to flush.",
2845 sc->sc_flush_count);
2846 }
2847 if (sc->sc_delayed_count > 0) {
2848 GJ_DEBUG(2, "%d requests delayed.",
2849 sc->sc_delayed_count);
2850 }
2851 }
2852 g_journal_stats_switches++;
2853 if (sc->sc_copy_in_progress > 0)
2854 g_journal_stats_wait_for_copy++;
2855 GJ_TIMER_START(1, &bt);
2856 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2857 sc->sc_flags |= GJF_DEVICE_SWITCH;
2858 wakeup(sc);
2859 while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2860 msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2861 "gj:switch", 0);
2862 }
2863 GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2864 }
2865
2866 static void
2867 g_journal_do_switch(struct g_class *classp)
2868 {
2869 struct g_journal_softc *sc;
2870 const struct g_journal_desc *desc;
2871 struct g_geom *gp;
2872 struct mount *mp;
2873 struct bintime bt;
2874 char *mountpoint;
2875 int error, save;
2876
2877 DROP_GIANT();
2878 g_topology_lock();
2879 LIST_FOREACH(gp, &classp->geom, geom) {
2880 sc = gp->softc;
2881 if (sc == NULL)
2882 continue;
2883 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2884 continue;
2885 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2886 continue;
2887 mtx_lock(&sc->sc_mtx);
2888 sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2889 mtx_unlock(&sc->sc_mtx);
2890 }
2891 g_topology_unlock();
2892 PICKUP_GIANT();
2893
2894 mtx_lock(&mountlist_mtx);
2895 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2896 if (mp->mnt_gjprovider == NULL)
2897 continue;
2898 if (mp->mnt_flag & MNT_RDONLY)
2899 continue;
2900 desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2901 if (desc == NULL)
2902 continue;
2903 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2904 continue;
2905 /* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2906
2907 DROP_GIANT();
2908 g_topology_lock();
2909 sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2910 g_topology_unlock();
2911 PICKUP_GIANT();
2912
2913 if (sc == NULL) {
2914 GJ_DEBUG(0, "Cannot find journal geom for %s.",
2915 mp->mnt_gjprovider);
2916 goto next;
2917 } else if (JEMPTY(sc)) {
2918 mtx_lock(&sc->sc_mtx);
2919 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2920 mtx_unlock(&sc->sc_mtx);
2921 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2922 goto next;
2923 }
2924
2925 mountpoint = mp->mnt_stat.f_mntonname;
2926
2927 error = vn_start_write(NULL, &mp, V_WAIT);
2928 if (error != 0) {
2929 GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2930 mountpoint, error);
2931 goto next;
2932 }
2933
2934 save = curthread_pflags_set(TDP_SYNCIO);
2935
2936 GJ_TIMER_START(1, &bt);
2937 vfs_msync(mp, MNT_NOWAIT);
2938 GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2939
2940 GJ_TIMER_START(1, &bt);
2941 error = VFS_SYNC(mp, MNT_NOWAIT);
2942 if (error == 0)
2943 GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2944 else {
2945 GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2946 mountpoint, error);
2947 }
2948
2949 curthread_pflags_restore(save);
2950
2951 vn_finished_write(mp);
2952
2953 if (error != 0)
2954 goto next;
2955
2956 /*
2957 * Send BIO_FLUSH before freezing the file system, so it can be
2958 * faster after the freeze.
2959 */
2960 GJ_TIMER_START(1, &bt);
2961 g_journal_flush_cache(sc);
2962 GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2963
2964 GJ_TIMER_START(1, &bt);
2965 error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2966 GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2967 if (error != 0) {
2968 GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2969 mountpoint, error);
2970 goto next;
2971 }
2972
2973 error = desc->jd_clean(mp);
2974 if (error != 0)
2975 goto next;
2976
2977 mtx_lock(&sc->sc_mtx);
2978 g_journal_switch_wait(sc);
2979 mtx_unlock(&sc->sc_mtx);
2980
2981 vfs_write_resume(mp, 0);
2982 next:
2983 mtx_lock(&mountlist_mtx);
2984 vfs_unbusy(mp);
2985 }
2986 mtx_unlock(&mountlist_mtx);
2987
2988 sc = NULL;
2989 for (;;) {
2990 DROP_GIANT();
2991 g_topology_lock();
2992 LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2993 sc = gp->softc;
2994 if (sc == NULL)
2995 continue;
2996 mtx_lock(&sc->sc_mtx);
2997 if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2998 !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2999 (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
3000 break;
3001 }
3002 mtx_unlock(&sc->sc_mtx);
3003 sc = NULL;
3004 }
3005 g_topology_unlock();
3006 PICKUP_GIANT();
3007 if (sc == NULL)
3008 break;
3009 mtx_assert(&sc->sc_mtx, MA_OWNED);
3010 g_journal_switch_wait(sc);
3011 mtx_unlock(&sc->sc_mtx);
3012 }
3013 }
3014
3015 /*
3016 * TODO: Switcher thread should be started on first geom creation and killed on
3017 * last geom destruction.
3018 */
3019 static void
3020 g_journal_switcher(void *arg)
3021 {
3022 struct g_class *mp;
3023 struct bintime bt;
3024 int error;
3025
3026 mp = arg;
3027 curthread->td_pflags |= TDP_NORUNNINGBUF;
3028 for (;;) {
3029 g_journal_switcher_wokenup = 0;
3030 error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3031 g_journal_switch_time * hz);
3032 if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3033 g_journal_switcher_state = GJ_SWITCHER_DIED;
3034 GJ_DEBUG(1, "Switcher exiting.");
3035 wakeup(&g_journal_switcher_state);
3036 kproc_exit(0);
3037 }
3038 if (error == 0 && g_journal_sync_requested == 0) {
3039 GJ_DEBUG(1, "Out of cache, force switch (used=%u "
3040 "limit=%u).", g_journal_cache_used,
3041 g_journal_cache_limit);
3042 }
3043 GJ_TIMER_START(1, &bt);
3044 g_journal_do_switch(mp);
3045 GJ_TIMER_STOP(1, &bt, "Entire switch time");
3046 if (g_journal_sync_requested > 0) {
3047 g_journal_sync_requested = 0;
3048 wakeup(&g_journal_sync_requested);
3049 }
3050 }
3051 }
Cache object: df5070801baf22c25a1c7df0cb877183
|