1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
35 */
36 /*
37 * HAMMER dependancy flusher thread
38 *
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
41 */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static int hammer_flusher_flush(hammer_mount_t hmp, int *nomorep);
48 static int hammer_flusher_flush_inode(hammer_inode_t ip, void *data);
49
50 RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
51 hammer_ino_rb_compare);
52
53 /*
54 * Support structures for the flusher threads.
55 */
56 struct hammer_flusher_info {
57 TAILQ_ENTRY(hammer_flusher_info) entry;
58 struct hammer_mount *hmp;
59 thread_t td;
60 int runstate;
61 int count;
62 hammer_flush_group_t flg;
63 struct hammer_transaction trans; /* per-slave transaction */
64 };
65
66 typedef struct hammer_flusher_info *hammer_flusher_info_t;
67
68 /*
69 * Sync all inodes pending on the flusher.
70 *
71 * All flush groups will be flushed. This does not queue dirty inodes
72 * to the flush groups, it just flushes out what has already been queued!
73 */
74 void
75 hammer_flusher_sync(hammer_mount_t hmp)
76 {
77 int seq;
78
79 seq = hammer_flusher_async(hmp, NULL);
80 hammer_flusher_wait(hmp, seq);
81 }
82
83 /*
84 * Sync all flush groups through to close_flg - return immediately.
85 * If close_flg is NULL all flush groups are synced.
86 *
87 * Returns the sequence number of the last closed flush group,
88 * which may be close_flg. When syncing to the end if there
89 * are no flush groups pending we still cycle the flusher, and
90 * must allocate a sequence number to placemark the spot even
91 * though no flush group will ever be associated with it.
92 */
93 int
94 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
95 {
96 hammer_flush_group_t flg;
97 int seq;
98
99 /*
100 * Already closed
101 */
102 if (close_flg && close_flg->closed)
103 return(close_flg->seq);
104
105 /*
106 * Close flush groups until we hit the end of the list
107 * or close_flg.
108 */
109 while ((flg = hmp->next_flush_group) != NULL) {
110 KKASSERT(flg->closed == 0 && flg->running == 0);
111 flg->closed = 1;
112 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
113 if (flg == close_flg)
114 break;
115 }
116
117 if (hmp->flusher.td) {
118 if (hmp->flusher.signal++ == 0)
119 wakeup(&hmp->flusher.signal);
120 if (flg) {
121 seq = flg->seq;
122 } else {
123 seq = hmp->flusher.next;
124 ++hmp->flusher.next;
125 }
126 } else {
127 seq = hmp->flusher.done;
128 }
129 return(seq);
130 }
131
132 /*
133 * Flush the current/next flushable flg. This function is typically called
134 * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate
135 * flush groups until specific conditions are met.
136 *
137 * If a flush is currently in progress its seq is returned.
138 *
139 * If no flush is currently in progress the next available flush group
140 * will be flushed and its seq returned.
141 *
142 * If no flush groups are present a dummy seq will be allocated and
143 * returned and the flusher will be activated (e.g. to flush the
144 * undo/redo and the volume header).
145 */
146 int
147 hammer_flusher_async_one(hammer_mount_t hmp)
148 {
149 hammer_flush_group_t flg;
150 int seq;
151
152 if (hmp->flusher.td) {
153 flg = TAILQ_FIRST(&hmp->flush_group_list);
154 seq = hammer_flusher_async(hmp, flg);
155 } else {
156 seq = hmp->flusher.done;
157 }
158 return(seq);
159 }
160
161 /*
162 * Wait for the flusher to finish flushing the specified sequence
163 * number. The flush is already running and will signal us on
164 * each completion.
165 */
166 void
167 hammer_flusher_wait(hammer_mount_t hmp, int seq)
168 {
169 while ((int)(seq - hmp->flusher.done) > 0)
170 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
171 }
172
173 /*
174 * Returns non-zero if the flusher is currently running. Used for
175 * time-domain multiplexing of frontend operations in order to avoid
176 * starving the backend flusher.
177 */
178 int
179 hammer_flusher_running(hammer_mount_t hmp)
180 {
181 int seq = hmp->flusher.next - 1;
182 if ((int)(seq - hmp->flusher.done) > 0)
183 return(1);
184 return (0);
185 }
186
187 void
188 hammer_flusher_wait_next(hammer_mount_t hmp)
189 {
190 int seq;
191
192 seq = hammer_flusher_async_one(hmp);
193 hammer_flusher_wait(hmp, seq);
194 }
195
196 void
197 hammer_flusher_create(hammer_mount_t hmp)
198 {
199 hammer_flusher_info_t info;
200 int i;
201
202 hmp->flusher.signal = 0;
203 hmp->flusher.done = 0;
204 hmp->flusher.next = 1;
205 hammer_ref(&hmp->flusher.finalize_lock);
206 TAILQ_INIT(&hmp->flusher.run_list);
207 TAILQ_INIT(&hmp->flusher.ready_list);
208
209 lwkt_create(hammer_flusher_master_thread, hmp,
210 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
211 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
212 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
213 info->hmp = hmp;
214 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
215 lwkt_create(hammer_flusher_slave_thread, info,
216 &info->td, NULL, 0, -1, "hammer-S%d", i);
217 }
218 }
219
220 void
221 hammer_flusher_destroy(hammer_mount_t hmp)
222 {
223 hammer_flusher_info_t info;
224
225 /*
226 * Kill the master
227 */
228 hmp->flusher.exiting = 1;
229 while (hmp->flusher.td) {
230 ++hmp->flusher.signal;
231 wakeup(&hmp->flusher.signal);
232 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
233 }
234
235 /*
236 * Kill the slaves
237 */
238 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
239 KKASSERT(info->runstate == 0);
240 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
241 info->runstate = -1;
242 wakeup(&info->runstate);
243 while (info->td)
244 tsleep(&info->td, 0, "hmrwwc", 0);
245 kfree(info, hmp->m_misc);
246 }
247 }
248
249 /*
250 * The master flusher thread manages the flusher sequence id and
251 * synchronization with the slave work threads.
252 */
253 static void
254 hammer_flusher_master_thread(void *arg)
255 {
256 hammer_mount_t hmp;
257 int seq;
258 int nomore;
259
260 hmp = arg;
261
262 lwkt_gettoken(&hmp->fs_token);
263
264 for (;;) {
265 /*
266 * Flush all sequence numbers up to but not including .next,
267 * or until an open flush group is encountered.
268 */
269 for (;;) {
270 while (hmp->flusher.group_lock)
271 tsleep(&hmp->flusher.group_lock, 0, "hmrhld",0);
272 hammer_flusher_clean_loose_ios(hmp);
273
274 seq = hammer_flusher_flush(hmp, &nomore);
275 hmp->flusher.done = seq;
276 wakeup(&hmp->flusher.done);
277
278 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
279 break;
280 if (nomore)
281 break;
282 }
283
284 /*
285 * Wait for activity.
286 */
287 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
288 break;
289 while (hmp->flusher.signal == 0)
290 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
291 hmp->flusher.signal = 0;
292 }
293
294 /*
295 * And we are done.
296 */
297 hmp->flusher.td = NULL;
298 wakeup(&hmp->flusher.exiting);
299 lwkt_reltoken(&hmp->fs_token);
300 lwkt_exit();
301 }
302
303 /*
304 * Flush the next sequence number until an open flush group is encountered
305 * or we reach (next). Not all sequence numbers will have flush groups
306 * associated with them. These require that the UNDO/REDO FIFO still be
307 * flushed since it can take at least one additional run to synchronize
308 * the FIFO, and more to also synchronize the reserve structures.
309 */
310 static int
311 hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
312 {
313 hammer_flusher_info_t info;
314 hammer_flush_group_t flg;
315 hammer_reserve_t resv;
316 int count;
317 int seq;
318
319 /*
320 * Just in-case there's a flush race on mount. Seq number
321 * does not change.
322 */
323 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
324 *nomorep = 1;
325 return (hmp->flusher.done);
326 }
327 *nomorep = 0;
328
329 /*
330 * Flush the next sequence number. Sequence numbers can exist
331 * without an assigned flush group, indicating that just a FIFO flush
332 * should occur.
333 */
334 seq = hmp->flusher.done + 1;
335 flg = TAILQ_FIRST(&hmp->flush_group_list);
336 if (flg == NULL) {
337 if (seq == hmp->flusher.next) {
338 *nomorep = 1;
339 return (hmp->flusher.done);
340 }
341 } else if (seq == flg->seq) {
342 if (flg->closed) {
343 KKASSERT(flg->running == 0);
344 flg->running = 1;
345 if (hmp->fill_flush_group == flg) {
346 hmp->fill_flush_group =
347 TAILQ_NEXT(flg, flush_entry);
348 }
349 } else {
350 *nomorep = 1;
351 return (hmp->flusher.done);
352 }
353 } else {
354 KKASSERT((int)(flg->seq - seq) > 0);
355 flg = NULL;
356 }
357
358 /*
359 * We only do one flg but we may have to loop/retry.
360 *
361 * Due to various races it is possible to come across a flush
362 * group which as not yet been closed.
363 */
364 count = 0;
365 while (flg && flg->running) {
366 ++count;
367 if (hammer_debug_general & 0x0001) {
368 kprintf("hammer_flush %d ttl=%d recs=%d\n",
369 flg->seq, flg->total_count, flg->refs);
370 }
371 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
372 break;
373 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
374
375 /*
376 * If the previous flush cycle just about exhausted our
377 * UNDO space we may have to do a dummy cycle to move the
378 * first_offset up before actually digging into a new cycle,
379 * or the new cycle will not have sufficient undo space.
380 */
381 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
382 hammer_flusher_finalize(&hmp->flusher.trans, 0);
383
384 KKASSERT(hmp->next_flush_group != flg);
385
386 /*
387 * Place the flg in the flusher structure and start the
388 * slaves running. The slaves will compete for inodes
389 * to flush.
390 *
391 * Make a per-thread copy of the transaction.
392 */
393 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
394 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
395 info->flg = flg;
396 info->runstate = 1;
397 info->trans = hmp->flusher.trans;
398 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
399 wakeup(&info->runstate);
400 }
401
402 /*
403 * Wait for all slaves to finish running
404 */
405 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
406 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
407
408 /*
409 * Do the final finalization, clean up
410 */
411 hammer_flusher_finalize(&hmp->flusher.trans, 1);
412 hmp->flusher.tid = hmp->flusher.trans.tid;
413
414 hammer_done_transaction(&hmp->flusher.trans);
415
416 /*
417 * Loop up on the same flg. If the flg is done clean it up
418 * and break out. We only flush one flg.
419 */
420 if (RB_EMPTY(&flg->flush_tree)) {
421 KKASSERT(flg->refs == 0);
422 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
423 kfree(flg, hmp->m_misc);
424 break;
425 }
426 KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
427 }
428
429 /*
430 * We may have pure meta-data to flush, or we may have to finish
431 * cycling the UNDO FIFO, even if there were no flush groups.
432 */
433 if (count == 0 && hammer_flusher_haswork(hmp)) {
434 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
435 hammer_flusher_finalize(&hmp->flusher.trans, 1);
436 hammer_done_transaction(&hmp->flusher.trans);
437 }
438
439 /*
440 * Clean up any freed big-blocks (typically zone-2).
441 * resv->flush_group is typically set several flush groups ahead
442 * of the free to ensure that the freed block is not reused until
443 * it can no longer be reused.
444 */
445 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
446 if ((int)(resv->flush_group - seq) > 0)
447 break;
448 hammer_reserve_clrdelay(hmp, resv);
449 }
450 return (seq);
451 }
452
453
454 /*
455 * The slave flusher thread pulls work off the master flush list until no
456 * work is left.
457 */
458 static void
459 hammer_flusher_slave_thread(void *arg)
460 {
461 hammer_flush_group_t flg;
462 hammer_flusher_info_t info;
463 hammer_mount_t hmp;
464
465 info = arg;
466 hmp = info->hmp;
467 lwkt_gettoken(&hmp->fs_token);
468
469 for (;;) {
470 while (info->runstate == 0)
471 tsleep(&info->runstate, 0, "hmrssw", 0);
472 if (info->runstate < 0)
473 break;
474 flg = info->flg;
475
476 RB_SCAN(hammer_fls_rb_tree, &flg->flush_tree, NULL,
477 hammer_flusher_flush_inode, info);
478
479 info->count = 0;
480 info->runstate = 0;
481 info->flg = NULL;
482 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
483 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
484 wakeup(&hmp->flusher.ready_list);
485 }
486 info->td = NULL;
487 wakeup(&info->td);
488 lwkt_reltoken(&hmp->fs_token);
489 lwkt_exit();
490 }
491
492 void
493 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
494 {
495 hammer_buffer_t buffer;
496 hammer_io_t io;
497
498 /*
499 * loose ends - buffers without bp's aren't tracked by the kernel
500 * and can build up, so clean them out. This can occur when an
501 * IO completes on a buffer with no references left.
502 *
503 * The io_token is needed to protect the list.
504 */
505 if ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
506 lwkt_gettoken(&hmp->io_token);
507 while ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
508 KKASSERT(io->mod_root == &hmp->lose_root);
509 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
510 io->mod_root = NULL;
511 hammer_ref(&io->lock);
512 buffer = (void *)io;
513 hammer_rel_buffer(buffer, 0);
514 }
515 lwkt_reltoken(&hmp->io_token);
516 }
517 }
518
519 /*
520 * Flush a single inode that is part of a flush group.
521 *
522 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
523 * the front-end should have reserved sufficient space on the media. Any
524 * error other then EWOULDBLOCK will force the mount to be read-only.
525 */
526 static
527 int
528 hammer_flusher_flush_inode(hammer_inode_t ip, void *data)
529 {
530 hammer_flusher_info_t info = data;
531 hammer_mount_t hmp = info->hmp;
532 hammer_transaction_t trans = &info->trans;
533 int error;
534
535 /*
536 * Several slaves are operating on the same flush group concurrently.
537 * The SLAVEFLUSH flag prevents them from tripping over each other.
538 *
539 * NOTE: It is possible for a EWOULDBLOCK'd ip returned by one slave
540 * to be resynced by another, but normally such inodes are not
541 * revisited until the master loop gets to them.
542 */
543 if (ip->flags & HAMMER_INODE_SLAVEFLUSH)
544 return(0);
545 ip->flags |= HAMMER_INODE_SLAVEFLUSH;
546 ++hammer_stats_inode_flushes;
547
548 hammer_flusher_clean_loose_ios(hmp);
549 vm_wait_nominal();
550 error = hammer_sync_inode(trans, ip);
551
552 /*
553 * EWOULDBLOCK can happen under normal operation, all other errors
554 * are considered extremely serious. We must set WOULDBLOCK
555 * mechanics to deal with the mess left over from the abort of the
556 * previous flush.
557 */
558 if (error) {
559 ip->flags |= HAMMER_INODE_WOULDBLOCK;
560 if (error == EWOULDBLOCK)
561 error = 0;
562 }
563 hammer_flush_inode_done(ip, error);
564 /* ip invalid */
565
566 while (hmp->flusher.finalize_want)
567 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
568 if (hammer_flusher_undo_exhausted(trans, 1)) {
569 kprintf("HAMMER: Warning: UNDO area too small!\n");
570 hammer_flusher_finalize(trans, 1);
571 } else if (hammer_flusher_meta_limit(trans->hmp)) {
572 hammer_flusher_finalize(trans, 0);
573 }
574 return (0);
575 }
576
577 /*
578 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
579 * space left.
580 *
581 * 1/4 - Emergency free undo space level. Below this point the flusher
582 * will finalize even if directory dependancies have not been resolved.
583 *
584 * 2/4 - Used by the pruning and reblocking code. These functions may be
585 * running in parallel with a flush and cannot be allowed to drop
586 * available undo space to emergency levels.
587 *
588 * 3/4 - Used at the beginning of a flush to force-sync the volume header
589 * to give the flush plenty of runway to work in.
590 */
591 int
592 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
593 {
594 if (hammer_undo_space(trans) <
595 hammer_undo_max(trans->hmp) * quarter / 4) {
596 return(1);
597 } else {
598 return(0);
599 }
600 }
601
602 /*
603 * Flush all pending UNDOs, wait for write completion, update the volume
604 * header with the new UNDO end position, and flush it. Then
605 * asynchronously flush the meta-data.
606 *
607 * If this is the last finalization in a flush group we also synchronize
608 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
609 * fifo first_offset so the next flush resets the FIFO pointers.
610 *
611 * If this is not final it is being called because too many dirty meta-data
612 * buffers have built up and must be flushed with UNDO synchronization to
613 * avoid a buffer cache deadlock.
614 */
615 void
616 hammer_flusher_finalize(hammer_transaction_t trans, int final)
617 {
618 hammer_volume_t root_volume;
619 hammer_blockmap_t cundomap, dundomap;
620 hammer_mount_t hmp;
621 hammer_io_t io;
622 hammer_off_t save_undo_next_offset;
623 int count;
624 int i;
625
626 hmp = trans->hmp;
627 root_volume = trans->rootvol;
628
629 /*
630 * Exclusively lock the flusher. This guarantees that all dirty
631 * buffers will be idled (have a mod-count of 0).
632 */
633 ++hmp->flusher.finalize_want;
634 hammer_lock_ex(&hmp->flusher.finalize_lock);
635
636 /*
637 * If this isn't the final sync several threads may have hit the
638 * meta-limit at the same time and raced. Only sync if we really
639 * have to, after acquiring the lock.
640 */
641 if (final == 0 && !hammer_flusher_meta_limit(hmp))
642 goto done;
643
644 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
645 goto done;
646
647 /*
648 * Flush data buffers. This can occur asynchronously and at any
649 * time. We must interlock against the frontend direct-data write
650 * but do not have to acquire the sync-lock yet.
651 *
652 * These data buffers have already been collected prior to the
653 * related inode(s) getting queued to the flush group.
654 */
655 count = 0;
656 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->data_root)) != NULL) {
657 if (io->ioerror)
658 break;
659 hammer_ref(&io->lock);
660 hammer_io_write_interlock(io);
661 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
662 hammer_io_flush(io, 0);
663 hammer_io_done_interlock(io);
664 hammer_rel_buffer((hammer_buffer_t)io, 0);
665 hammer_io_limit_backlog(hmp);
666 ++count;
667 }
668
669 /*
670 * The sync-lock is required for the remaining sequence. This lock
671 * prevents meta-data from being modified.
672 */
673 hammer_sync_lock_ex(trans);
674
675 /*
676 * If we have been asked to finalize the volume header sync the
677 * cached blockmap to the on-disk blockmap. Generate an UNDO
678 * record for the update.
679 */
680 if (final) {
681 cundomap = &hmp->blockmap[0];
682 dundomap = &root_volume->ondisk->vol0_blockmap[0];
683 if (root_volume->io.modified) {
684 hammer_modify_volume(trans, root_volume,
685 dundomap, sizeof(hmp->blockmap));
686 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
687 hammer_crc_set_blockmap(&cundomap[i]);
688 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
689 hammer_modify_volume_done(root_volume);
690 }
691 }
692
693 /*
694 * Flush UNDOs. This can occur concurrently with the data flush
695 * because data writes never overwrite.
696 *
697 * This also waits for I/Os to complete and flushes the cache on
698 * the target disk.
699 *
700 * Record the UNDO append point as this can continue to change
701 * after we have flushed the UNDOs.
702 */
703 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
704 hammer_lock_ex(&hmp->undo_lock);
705 save_undo_next_offset = cundomap->next_offset;
706 hammer_unlock(&hmp->undo_lock);
707 hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
708
709 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
710 goto failed;
711
712 /*
713 * HAMMER VERSION < 4:
714 * Update the on-disk volume header with new UNDO FIFO end
715 * position (do not generate new UNDO records for this change).
716 * We have to do this for the UNDO FIFO whether (final) is
717 * set or not in order for the UNDOs to be recognized on
718 * recovery.
719 *
720 * HAMMER VERSION >= 4:
721 * The UNDO FIFO data written above will be recognized on
722 * recovery without us having to sync the volume header.
723 *
724 * Also update the on-disk next_tid field. This does not require
725 * an UNDO. However, because our TID is generated before we get
726 * the sync lock another sync may have beat us to the punch.
727 *
728 * This also has the side effect of updating first_offset based on
729 * a prior finalization when the first finalization of the next flush
730 * cycle occurs, removing any undo info from the prior finalization
731 * from consideration.
732 *
733 * The volume header will be flushed out synchronously.
734 */
735 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
736 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
737
738 if (dundomap->first_offset != cundomap->first_offset ||
739 dundomap->next_offset != save_undo_next_offset) {
740 hammer_modify_volume(NULL, root_volume, NULL, 0);
741 dundomap->first_offset = cundomap->first_offset;
742 dundomap->next_offset = save_undo_next_offset;
743 hammer_crc_set_blockmap(dundomap);
744 hammer_modify_volume_done(root_volume);
745 }
746
747 /*
748 * vol0_next_tid is used for TID selection and is updated without
749 * an UNDO so we do not reuse a TID that may have been rolled-back.
750 *
751 * vol0_last_tid is the highest fully-synchronized TID. It is
752 * set-up when the UNDO fifo is fully synced, later on (not here).
753 *
754 * The root volume can be open for modification by other threads
755 * generating UNDO or REDO records. For example, reblocking,
756 * pruning, REDO mode fast-fsyncs, so the write interlock is
757 * mandatory.
758 */
759 if (root_volume->io.modified) {
760 hammer_modify_volume(NULL, root_volume, NULL, 0);
761 if (root_volume->ondisk->vol0_next_tid < trans->tid)
762 root_volume->ondisk->vol0_next_tid = trans->tid;
763 hammer_crc_set_volume(root_volume->ondisk);
764 hammer_modify_volume_done(root_volume);
765 hammer_io_write_interlock(&root_volume->io);
766 hammer_io_flush(&root_volume->io, 0);
767 hammer_io_done_interlock(&root_volume->io);
768 }
769
770 /*
771 * Wait for I/Os to complete.
772 *
773 * For HAMMER VERSION 4+ filesystems we do not have to wait for
774 * the I/O to complete as the new UNDO FIFO entries are recognized
775 * even without the volume header update. This allows the volume
776 * header to flushed along with meta-data, significantly reducing
777 * flush overheads.
778 */
779 hammer_flusher_clean_loose_ios(hmp);
780 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
781 hammer_io_wait_all(hmp, "hmrfl3", 1);
782
783 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
784 goto failed;
785
786 /*
787 * Flush meta-data. The meta-data will be undone if we crash
788 * so we can safely flush it asynchronously. There is no need
789 * to wait for I/O to complete (or issue a synchronous disk flush).
790 *
791 * In fact, even if we did wait the meta-data will still be undone
792 * by a crash up until the next flush cycle due to the first_offset
793 * in the volume header for the UNDO FIFO not being adjusted until
794 * the following flush cycle.
795 *
796 * No io interlock is needed, bioops callbacks will not mess with
797 * meta data buffers.
798 */
799 count = 0;
800 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->meta_root)) != NULL) {
801 if (io->ioerror)
802 break;
803 KKASSERT(io->modify_refs == 0);
804 hammer_ref(&io->lock);
805 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
806 hammer_io_flush(io, 0);
807 hammer_rel_buffer((hammer_buffer_t)io, 0);
808 hammer_io_limit_backlog(hmp);
809 ++count;
810 }
811
812 /*
813 * If this is the final finalization for the flush group set
814 * up for the next sequence by setting a new first_offset in
815 * our cached blockmap and clearing the undo history.
816 *
817 * Even though we have updated our cached first_offset, the on-disk
818 * first_offset still governs available-undo-space calculations.
819 *
820 * We synchronize to save_undo_next_offset rather than
821 * cundomap->next_offset because that is what we flushed out
822 * above.
823 *
824 * NOTE! UNDOs can only be added with the sync_lock held
825 * so we can clear the undo history without racing.
826 * REDOs can be added at any time which is why we
827 * have to be careful and use save_undo_next_offset
828 * when setting the new first_offset.
829 */
830 if (final) {
831 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
832 if (cundomap->first_offset != save_undo_next_offset) {
833 cundomap->first_offset = save_undo_next_offset;
834 hmp->hflags |= HMNT_UNDO_DIRTY;
835 } else if (cundomap->first_offset != cundomap->next_offset) {
836 hmp->hflags |= HMNT_UNDO_DIRTY;
837 } else {
838 hmp->hflags &= ~HMNT_UNDO_DIRTY;
839 }
840 hammer_clear_undo_history(hmp);
841
842 /*
843 * Flush tid sequencing. flush_tid1 is fully synchronized,
844 * meaning a crash will not roll it back. flush_tid2 has
845 * been written out asynchronously and a crash will roll
846 * it back. flush_tid1 is used for all mirroring masters.
847 */
848 if (hmp->flush_tid1 != hmp->flush_tid2) {
849 hmp->flush_tid1 = hmp->flush_tid2;
850 wakeup(&hmp->flush_tid1);
851 }
852 hmp->flush_tid2 = trans->tid;
853
854 /*
855 * Clear the REDO SYNC flag. This flag is used to ensure
856 * that the recovery span in the UNDO/REDO FIFO contains
857 * at least one REDO SYNC record.
858 */
859 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
860 }
861
862 /*
863 * Cleanup. Report any critical errors.
864 */
865 failed:
866 hammer_sync_unlock(trans);
867
868 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
869 kprintf("HAMMER(%s): Critical write error during flush, "
870 "refusing to sync UNDO FIFO\n",
871 root_volume->ondisk->vol_name);
872 }
873
874 done:
875 hammer_unlock(&hmp->flusher.finalize_lock);
876
877 if (--hmp->flusher.finalize_want == 0)
878 wakeup(&hmp->flusher.finalize_want);
879 hammer_stats_commits += final;
880 }
881
882 /*
883 * Flush UNDOs.
884 */
885 void
886 hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
887 {
888 hammer_io_t io;
889 int count;
890
891 count = 0;
892 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->undo_root)) != NULL) {
893 if (io->ioerror)
894 break;
895 hammer_ref(&io->lock);
896 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
897 hammer_io_write_interlock(io);
898 hammer_io_flush(io, hammer_undo_reclaim(io));
899 hammer_io_done_interlock(io);
900 hammer_rel_buffer((hammer_buffer_t)io, 0);
901 hammer_io_limit_backlog(hmp);
902 ++count;
903 }
904 hammer_flusher_clean_loose_ios(hmp);
905 if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
906 (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
907 hammer_io_wait_all(hmp, "hmrfl1", 1);
908 } else {
909 hammer_io_wait_all(hmp, "hmrfl2", 0);
910 }
911 }
912
913 /*
914 * Return non-zero if too many dirty meta-data buffers have built up.
915 *
916 * Since we cannot allow such buffers to flush until we have dealt with
917 * the UNDOs, we risk deadlocking the kernel's buffer cache.
918 */
919 int
920 hammer_flusher_meta_limit(hammer_mount_t hmp)
921 {
922 if (hmp->locked_dirty_space + hmp->io_running_space >
923 hammer_limit_dirtybufspace) {
924 return(1);
925 }
926 return(0);
927 }
928
929 /*
930 * Return non-zero if too many dirty meta-data buffers have built up.
931 *
932 * This version is used by background operations (mirror, prune, reblock)
933 * to leave room for foreground operations.
934 */
935 int
936 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
937 {
938 if (hmp->locked_dirty_space + hmp->io_running_space >
939 hammer_limit_dirtybufspace / 2) {
940 return(1);
941 }
942 return(0);
943 }
944
945 /*
946 * Return non-zero if the flusher still has something to flush.
947 */
948 int
949 hammer_flusher_haswork(hammer_mount_t hmp)
950 {
951 if (hmp->ronly)
952 return(0);
953 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
954 return(0);
955 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */
956 RB_ROOT(&hmp->volu_root) || /* dirty buffers */
957 RB_ROOT(&hmp->undo_root) ||
958 RB_ROOT(&hmp->data_root) ||
959 RB_ROOT(&hmp->meta_root) ||
960 (hmp->hflags & HMNT_UNDO_DIRTY) /* UNDO FIFO sync */
961 ) {
962 return(1);
963 }
964 return(0);
965 }
966
Cache object: c9157457792cb910511e760a55bd4f6c
|