1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34 /*
35 * This header file contains structures used internally by the HAMMERFS
36 * implementation. See hammer_disk.h for on-disk structures.
37 */
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41 #ifdef _KERNEL
42 #include <sys/kernel.h>
43 #endif
44 #include <sys/conf.h>
45 #ifdef _KERNEL
46 #include <sys/systm.h>
47 #endif
48 #include <sys/tree.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/mountctl.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/stat.h>
56 #include <sys/globaldata.h>
57 #include <sys/lockf.h>
58 #include <sys/buf.h>
59 #include <sys/queue.h>
60 #include <sys/ktr.h>
61 #include <sys/limits.h>
62 #include <vm/vm_extern.h>
63
64 #include <sys/buf2.h>
65 #ifdef _KERNEL
66 #include <sys/signal2.h>
67 #include <vm/vm_page2.h>
68 #endif
69
70 #include "hammer_disk.h"
71 #include "hammer_mount.h"
72 #include "hammer_ioctl.h"
73
74 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
75
76 MALLOC_DECLARE(M_HAMMER);
77
78 /*
79 * Kernel trace
80 */
81 #if !defined(KTR_HAMMER)
82 #define KTR_HAMMER KTR_ALL
83 #endif
84 /* KTR_INFO_MASTER_EXTERN(hammer); */
85
86 /*
87 * Misc structures
88 */
89 struct hammer_mount;
90
91 /*
92 * Key structure used for custom RB tree inode lookups. This prototypes
93 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
94 */
95 typedef struct hammer_inode_info {
96 int64_t obj_id; /* (key) object identifier */
97 hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */
98 u_int32_t obj_localization; /* (key) pseudo-fs */
99 union {
100 struct hammer_btree_leaf_elm *leaf;
101 } u;
102 } *hammer_inode_info_t;
103
104 typedef enum hammer_transaction_type {
105 HAMMER_TRANS_RO,
106 HAMMER_TRANS_STD,
107 HAMMER_TRANS_FLS
108 } hammer_transaction_type_t;
109
110 /*
111 * HAMMER Transaction tracking
112 */
113 struct hammer_transaction {
114 hammer_transaction_type_t type;
115 struct hammer_mount *hmp;
116 hammer_tid_t tid;
117 u_int64_t time;
118 u_int32_t time32;
119 int sync_lock_refs;
120 int flags;
121 struct hammer_volume *rootvol;
122 };
123
124 typedef struct hammer_transaction *hammer_transaction_t;
125
126 #define HAMMER_TRANSF_NEWINODE 0x0001
127 #define HAMMER_TRANSF_DIDIO 0x0002
128 #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */
129
130 /*
131 * HAMMER locks
132 */
133 struct hammer_lock {
134 volatile u_int refs; /* active references */
135 volatile u_int lockval; /* lock count and control bits */
136 struct thread *lowner; /* owner if exclusively held */
137 struct thread *rowner; /* owner if exclusively held */
138 };
139
140 #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */
141 #define HAMMER_REFS_WANTED 0x20000000 /* transition check */
142 #define HAMMER_REFS_CHECK 0x10000000 /* transition check */
143
144 #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \
145 HAMMER_REFS_WANTED | \
146 HAMMER_REFS_CHECK)
147
148 #define HAMMER_LOCKF_EXCLUSIVE 0x40000000
149 #define HAMMER_LOCKF_WANTED 0x20000000
150
151 static __inline int
152 hammer_notlocked(struct hammer_lock *lock)
153 {
154 return(lock->lockval == 0);
155 }
156
157 static __inline int
158 hammer_islocked(struct hammer_lock *lock)
159 {
160 return(lock->lockval != 0);
161 }
162
163 /*
164 * Returns the number of refs on the object.
165 */
166 static __inline int
167 hammer_isactive(struct hammer_lock *lock)
168 {
169 return(lock->refs & ~HAMMER_REFS_FLAGS);
170 }
171
172 static __inline int
173 hammer_oneref(struct hammer_lock *lock)
174 {
175 return((lock->refs & ~HAMMER_REFS_FLAGS) == 1);
176 }
177
178 static __inline int
179 hammer_norefs(struct hammer_lock *lock)
180 {
181 return((lock->refs & ~HAMMER_REFS_FLAGS) == 0);
182 }
183
184 static __inline int
185 hammer_norefsorlock(struct hammer_lock *lock)
186 {
187 return(lock->refs == 0);
188 }
189
190 static __inline int
191 hammer_refsorlock(struct hammer_lock *lock)
192 {
193 return(lock->refs != 0);
194 }
195
196 /*
197 * Return if we specifically own the lock exclusively.
198 */
199 static __inline int
200 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td)
201 {
202 if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) &&
203 lock->lowner == td) {
204 return(1);
205 }
206 return(0);
207 }
208
209 /*
210 * Flush state, used by various structures
211 */
212 typedef enum hammer_inode_state {
213 HAMMER_FST_IDLE,
214 HAMMER_FST_SETUP,
215 HAMMER_FST_FLUSH
216 } hammer_inode_state_t;
217
218 TAILQ_HEAD(hammer_record_list, hammer_record);
219
220 /*
221 * Pseudo-filesystem extended data tracking
222 */
223 struct hammer_pfs_rb_tree;
224 struct hammer_pseudofs_inmem;
225 RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem);
226 RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
227 hammer_pfs_rb_compare, u_int32_t);
228
229 struct hammer_pseudofs_inmem {
230 RB_ENTRY(hammer_pseudofs_inmem) rb_node;
231 struct hammer_lock lock;
232 u_int32_t localization;
233 hammer_tid_t create_tid;
234 int flags;
235 udev_t fsid_udev;
236 struct hammer_pseudofs_data pfsd;
237 };
238
239 typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t;
240
241 #define HAMMER_PFSM_DELETED 0x0001
242
243 /*
244 * Cache object ids. A fixed number of objid cache structures are
245 * created to reserve object id's for newly created files in multiples
246 * of 100,000, localized to a particular directory, and recycled as
247 * needed. This allows parallel create operations in different
248 * directories to retain fairly localized object ids which in turn
249 * improves reblocking performance and layout.
250 */
251 #define OBJID_CACHE_SIZE 2048
252 #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */
253 #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */
254 #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1)
255 #define OBJID_CACHE_BULK_MASK64 ((u_int64_t)(OBJID_CACHE_BULK - 1))
256
257 typedef struct hammer_objid_cache {
258 TAILQ_ENTRY(hammer_objid_cache) entry;
259 struct hammer_inode *dip;
260 hammer_tid_t base_tid;
261 int count;
262 u_int32_t bm0;
263 u_int32_t bm1[32];
264 } *hammer_objid_cache_t;
265
266 /*
267 * Associate an inode with a B-Tree node to cache search start positions
268 */
269 typedef struct hammer_node_cache {
270 TAILQ_ENTRY(hammer_node_cache) entry;
271 struct hammer_node *node;
272 struct hammer_inode *ip;
273 } *hammer_node_cache_t;
274
275 TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache);
276
277 /*
278 * Live dedup cache
279 */
280 struct hammer_dedup_crc_rb_tree;
281 RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache);
282 RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry,
283 hammer_dedup_crc_rb_compare, hammer_crc_t);
284
285 struct hammer_dedup_off_rb_tree;
286 RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache);
287 RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry,
288 hammer_dedup_off_rb_compare, hammer_off_t);
289
290 #define DEDUP_CACHE_SIZE 4096 /* XXX make it a dynamic tunable */
291
292 typedef struct hammer_dedup_cache {
293 RB_ENTRY(hammer_dedup_cache) crc_entry;
294 RB_ENTRY(hammer_dedup_cache) off_entry;
295 TAILQ_ENTRY(hammer_dedup_cache) lru_entry;
296 struct hammer_mount *hmp;
297 int64_t obj_id;
298 u_int32_t localization;
299 off_t file_offset;
300 int bytes;
301 hammer_off_t data_offset;
302 hammer_crc_t crc;
303 } *hammer_dedup_cache_t;
304
305 /*
306 * Structure used to organize flush groups. Flush groups must be
307 * organized into chunks in order to avoid blowing out the UNDO FIFO.
308 * Without this a 'sync' could end up flushing 50,000 inodes in a single
309 * transaction.
310 */
311 struct hammer_fls_rb_tree;
312 RB_HEAD(hammer_fls_rb_tree, hammer_inode);
313 RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
314 hammer_ino_rb_compare);
315
316 struct hammer_flush_group {
317 TAILQ_ENTRY(hammer_flush_group) flush_entry;
318 struct hammer_fls_rb_tree flush_tree;
319 int seq; /* our seq no */
320 int total_count; /* record load */
321 int running; /* group is running */
322 int closed;
323 int refs;
324 };
325
326 typedef struct hammer_flush_group *hammer_flush_group_t;
327
328 TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group);
329
330 /*
331 * Structure used to represent an inode in-memory.
332 *
333 * The record and data associated with an inode may be out of sync with
334 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
335 * clear).
336 *
337 * An inode may also hold a cache of unsynchronized records, used for
338 * database and directories only. Unsynchronized regular file data is
339 * stored in the buffer cache.
340 *
341 * NOTE: A file which is created and destroyed within the initial
342 * synchronization period can wind up not doing any disk I/O at all.
343 *
344 * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
345 */
346 struct hammer_ino_rb_tree;
347 struct hammer_inode;
348 RB_HEAD(hammer_ino_rb_tree, hammer_inode);
349 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
350 hammer_ino_rb_compare, hammer_inode_info_t);
351
352 struct hammer_redo_rb_tree;
353 RB_HEAD(hammer_redo_rb_tree, hammer_inode);
354 RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode,
355 hammer_redo_rb_compare, hammer_off_t);
356
357 struct hammer_rec_rb_tree;
358 struct hammer_record;
359 RB_HEAD(hammer_rec_rb_tree, hammer_record);
360 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
361 hammer_rec_rb_compare, hammer_btree_leaf_elm_t);
362
363 TAILQ_HEAD(hammer_node_list, hammer_node);
364
365 struct hammer_inode {
366 RB_ENTRY(hammer_inode) rb_node;
367 hammer_inode_state_t flush_state;
368 hammer_flush_group_t flush_group;
369 RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */
370 RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */
371 struct hammer_record_list target_list; /* target of dependant recs */
372 int64_t obj_id; /* (key) object identifier */
373 hammer_tid_t obj_asof; /* (key) snapshot or 0 */
374 u_int32_t obj_localization; /* (key) pseudo-fs */
375 struct hammer_mount *hmp;
376 hammer_objid_cache_t objid_cache;
377 int flags;
378 int error; /* flush error */
379 int cursor_ip_refs; /* sanity */
380 int cursor_exclreq_count;
381 int rsv_recs;
382 struct vnode *vp;
383 hammer_pseudofs_inmem_t pfsm;
384 struct lockf advlock;
385 struct hammer_lock lock; /* sync copy interlock */
386 off_t trunc_off;
387 struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */
388 struct hammer_inode_data ino_data; /* in-memory cache */
389 struct hammer_rec_rb_tree rec_tree; /* in-memory cache */
390 int rec_generation;
391 struct hammer_node_cache cache[4]; /* search initiate cache */
392
393 /*
394 * When a demark is created to synchronize an inode to
395 * disk, certain fields are copied so the front-end VOPs
396 * can continue to run in parallel with the synchronization
397 * occuring in the background.
398 */
399 int sync_flags; /* to-sync flags cache */
400 off_t sync_trunc_off; /* to-sync truncation */
401 off_t save_trunc_off; /* write optimization */
402 struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */
403 struct hammer_inode_data sync_ino_data; /* to-sync cache */
404 size_t redo_count;
405
406 /*
407 * Track the earliest offset in the UNDO/REDO FIFO containing
408 * REDO records. This is staged to the backend during flush
409 * sequences. While the inode is staged redo_fifo_next is used
410 * to track the earliest offset for rotation into redo_fifo_start
411 * on completion of the flush.
412 */
413 hammer_off_t redo_fifo_start;
414 hammer_off_t redo_fifo_next;
415 };
416
417 typedef struct hammer_inode *hammer_inode_t;
418
419 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data)
420
421 /*
422 * NOTE: DDIRTY does not include atime or mtime and does not include
423 * write-append size changes. SDIRTY handles write-append size
424 * changes.
425 *
426 * REDO indicates that REDO logging is active, creating a definitive
427 * stream of REDO records in the UNDO/REDO log for writes and
428 * truncations, including boundary records when/if REDO is turned off.
429 * REDO is typically enabled by fsync() and turned off if excessive
430 * writes without an fsync() occurs.
431 *
432 * RDIRTY indicates that REDO records were laid down in the UNDO/REDO
433 * FIFO (even if REDO is turned off some might still be active) and
434 * still being tracked for this inode. See hammer_redo.c
435 */
436 /* (not including atime/mtime) */
437 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */
438 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */
439 #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */
440 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */
441 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */
442 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */
443 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */
444 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */
445 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */
446 #define HAMMER_INODE_RECSW 0x0400 /* waiting on data record flush */
447 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */
448 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */
449 #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */
450 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */
451 #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */
452
453 #define HAMMER_INODE_TRUNCATED 0x00010000
454 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/
455 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */
456 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */
457 #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */
458 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */
459 #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */
460 #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/
461 #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */
462 #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */
463 #define HAMMER_INODE_SLAVEFLUSH 0x08000000 /* being flushed by slave */
464
465 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \
466 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \
467 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \
468 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
469
470 #define HAMMER_INODE_MODMASK_NOXDIRTY \
471 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
472
473 #define HAMMER_INODE_MODMASK_NOREDO \
474 (HAMMER_INODE_DDIRTY| \
475 HAMMER_INODE_XDIRTY| \
476 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
477
478 #define HAMMER_FLUSH_SIGNAL 0x0001
479 #define HAMMER_FLUSH_RECURSION 0x0002
480
481 /*
482 * Used by the inode reclaim code to pipeline reclaims and avoid
483 * blowing out kernel memory or letting the flusher get too far
484 * behind. The reclaim wakes up when count reaches 0 or the
485 * timer expires.
486 */
487 struct hammer_reclaim {
488 TAILQ_ENTRY(hammer_reclaim) entry;
489 int count;
490 };
491
492 /*
493 * Track who is creating the greatest burden on the
494 * inode cache.
495 */
496 struct hammer_inostats {
497 pid_t pid; /* track user process */
498 int ltick; /* last tick */
499 int count; /* count (degenerates) */
500 };
501
502 #define HAMMER_INOSTATS_HSIZE 32
503 #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1)
504
505 /*
506 * Structure used to represent an unsynchronized record in-memory. These
507 * records typically represent directory entries. Only non-historical
508 * records are kept in-memory.
509 *
510 * Records are organized as a per-inode RB-Tree. If the inode is not
511 * on disk then neither are any records and the in-memory record tree
512 * represents the entire contents of the inode. If the inode is on disk
513 * then the on-disk B-Tree is scanned in parallel with the in-memory
514 * RB-Tree to synthesize the current state of the file.
515 *
516 * Records are also used to enforce the ordering of directory create/delete
517 * operations. A new inode will not be flushed to disk unless its related
518 * directory entry is also being flushed at the same time. A directory entry
519 * will not be removed unless its related inode is also being removed at the
520 * same time.
521 */
522 typedef enum hammer_record_type {
523 HAMMER_MEM_RECORD_GENERAL, /* misc record */
524 HAMMER_MEM_RECORD_INODE, /* inode record */
525 HAMMER_MEM_RECORD_ADD, /* positive memory cache record */
526 HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */
527 HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */
528 } hammer_record_type_t;
529
530 struct hammer_record {
531 RB_ENTRY(hammer_record) rb_node;
532 TAILQ_ENTRY(hammer_record) target_entry;
533 hammer_inode_state_t flush_state;
534 hammer_flush_group_t flush_group;
535 hammer_record_type_t type;
536 struct hammer_lock lock;
537 struct hammer_reserve *resv;
538 struct hammer_inode *ip;
539 struct hammer_inode *target_ip;
540 struct hammer_btree_leaf_elm leaf;
541 union hammer_data_ondisk *data;
542 int flags;
543 int gflags;
544 hammer_off_t zone2_offset; /* direct-write only */
545 };
546
547 typedef struct hammer_record *hammer_record_t;
548
549 /*
550 * Record flags. Note that FE can only be set by the frontend if the
551 * record has not been interlocked by the backend w/ BE.
552 */
553 #define HAMMER_RECF_ALLOCDATA 0x0001
554 #define HAMMER_RECF_ONRBTREE 0x0002
555 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */
556 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */
557 #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */
558 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */
559 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */
560 #define HAMMER_RECF_DEDUPED 0x0080 /* will be live-dedup'ed */
561 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */
562 #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */
563
564 /*
565 * These flags must be separate to deal with SMP races
566 */
567 #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/
568 #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/
569 #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */
570 /*
571 * hammer_create_at_cursor() and hammer_delete_at_cursor() flags.
572 */
573 #define HAMMER_CREATE_MODE_UMIRROR 0x0001
574 #define HAMMER_CREATE_MODE_SYS 0x0002
575
576 #define HAMMER_DELETE_ADJUST 0x0001
577 #define HAMMER_DELETE_DESTROY 0x0002
578
579 /*
580 * In-memory structures representing on-disk structures.
581 */
582 struct hammer_volume;
583 struct hammer_buffer;
584 struct hammer_node;
585 struct hammer_undo;
586 struct hammer_reserve;
587
588 RB_HEAD(hammer_vol_rb_tree, hammer_volume);
589 RB_HEAD(hammer_buf_rb_tree, hammer_buffer);
590 RB_HEAD(hammer_nod_rb_tree, hammer_node);
591 RB_HEAD(hammer_und_rb_tree, hammer_undo);
592 RB_HEAD(hammer_res_rb_tree, hammer_reserve);
593 RB_HEAD(hammer_mod_rb_tree, hammer_io);
594
595 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node,
596 hammer_vol_rb_compare, int32_t);
597 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
598 hammer_buf_rb_compare, hammer_off_t);
599 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node,
600 hammer_nod_rb_compare, hammer_off_t);
601 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node,
602 hammer_und_rb_compare, hammer_off_t);
603 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node,
604 hammer_res_rb_compare, hammer_off_t);
605 RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node,
606 hammer_mod_rb_compare, hammer_off_t);
607
608 /*
609 * IO management - embedded at the head of various in-memory structures
610 *
611 * VOLUME - hammer_volume containing meta-data
612 * META_BUFFER - hammer_buffer containing meta-data
613 * DATA_BUFFER - hammer_buffer containing pure-data
614 *
615 * Dirty volume headers and dirty meta-data buffers are locked until the
616 * flusher can sequence them out. Dirty pure-data buffers can be written.
617 * Clean buffers can be passively released.
618 */
619 typedef enum hammer_io_type {
620 HAMMER_STRUCTURE_VOLUME,
621 HAMMER_STRUCTURE_META_BUFFER,
622 HAMMER_STRUCTURE_UNDO_BUFFER,
623 HAMMER_STRUCTURE_DATA_BUFFER,
624 HAMMER_STRUCTURE_DUMMY
625 } hammer_io_type_t;
626
627 union hammer_io_structure;
628 struct hammer_io;
629
630 struct worklist {
631 LIST_ENTRY(worklist) node;
632 };
633
634 TAILQ_HEAD(hammer_io_list, hammer_io);
635 typedef struct hammer_io_list *hammer_io_list_t;
636
637 struct hammer_io {
638 struct worklist worklist;
639 struct hammer_lock lock;
640 enum hammer_io_type type;
641 struct hammer_mount *hmp;
642 struct hammer_volume *volume;
643 RB_ENTRY(hammer_io) rb_node; /* if modified */
644 TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */
645 struct hammer_mod_rb_tree *mod_root;
646 struct buf *bp;
647 int64_t offset; /* zone-2 offset */
648 int bytes; /* buffer cache buffer size */
649 int modify_refs;
650
651 /*
652 * These can be modified at any time by the backend while holding
653 * io_token, due to bio_done and hammer_io_complete() callbacks.
654 */
655 u_int running : 1; /* bp write IO in progress */
656 u_int waiting : 1; /* someone is waiting on us */
657 u_int ioerror : 1; /* abort on io-error */
658 u_int unusedA : 29;
659
660 /*
661 * These can only be modified by the frontend while holding
662 * fs_token, or by the backend while holding the io interlocked
663 * with no references (which will block the frontend when it
664 * tries to reference it).
665 *
666 * WARNING! SMP RACES will create havoc if the callbacks ever tried
667 * to modify any of these outside the above restrictions.
668 */
669 u_int modified : 1; /* bp's data was modified */
670 u_int released : 1; /* bp released (w/ B_LOCKED set) */
671 u_int validated : 1; /* ondisk has been validated */
672 u_int waitdep : 1; /* flush waits for dependancies */
673 u_int recovered : 1; /* has recovery ref */
674 u_int waitmod : 1; /* waiting for modify_refs */
675 u_int reclaim : 1; /* reclaim requested */
676 u_int gencrc : 1; /* crc needs to be generated */
677 u_int unusedB : 24;
678 };
679
680 typedef struct hammer_io *hammer_io_t;
681
682 #define HAMMER_CLUSTER_SIZE (64 * 1024)
683 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
684 #undef HAMMER_CLUSTER_SIZE
685 #define HAMMER_CLUSTER_SIZE MAXBSIZE
686 #endif
687 #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE)
688
689 /*
690 * In-memory volume representing on-disk buffer
691 */
692 struct hammer_volume {
693 struct hammer_io io;
694 RB_ENTRY(hammer_volume) rb_node;
695 struct hammer_volume_ondisk *ondisk;
696 int32_t vol_no;
697 int64_t nblocks; /* note: special calculation for statfs */
698 int64_t buffer_base; /* base offset of buffer 0 */
699 hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */
700 hammer_off_t maxraw_off; /* Maximum raw offset for device */
701 char *vol_name;
702 struct vnode *devvp;
703 int vol_flags;
704 };
705
706 typedef struct hammer_volume *hammer_volume_t;
707
708 /*
709 * In-memory buffer (other then volume, super-cluster, or cluster),
710 * representing an on-disk buffer.
711 */
712 struct hammer_buffer {
713 struct hammer_io io;
714 RB_ENTRY(hammer_buffer) rb_node;
715 void *ondisk;
716 hammer_off_t zoneX_offset;
717 hammer_off_t zone2_offset;
718 struct hammer_reserve *resv;
719 struct hammer_node_list clist;
720 };
721
722 typedef struct hammer_buffer *hammer_buffer_t;
723
724 /*
725 * In-memory B-Tree node, representing an on-disk B-Tree node.
726 *
727 * This is a hang-on structure which is backed by a hammer_buffer,
728 * indexed by a hammer_cluster, and used for fine-grained locking of
729 * B-Tree nodes in order to properly control lock ordering. A hammer_buffer
730 * can contain multiple nodes representing wildly disassociated portions
731 * of the B-Tree so locking cannot be done on a buffer-by-buffer basis.
732 *
733 * This structure uses a cluster-relative index to reduce the number
734 * of layers required to access it, and also because all on-disk B-Tree
735 * references are cluster-relative offsets.
736 */
737 struct hammer_node {
738 struct hammer_lock lock; /* node-by-node lock */
739 TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */
740 RB_ENTRY(hammer_node) rb_node; /* per-cluster linkage */
741 hammer_off_t node_offset; /* full offset spec */
742 struct hammer_mount *hmp;
743 struct hammer_buffer *buffer; /* backing buffer */
744 hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */
745 TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */
746 struct hammer_node_cache_list cache_list; /* passive caches */
747 int flags;
748 int cursor_exclreq_count;
749 };
750
751 #define HAMMER_NODE_DELETED 0x0001
752 #define HAMMER_NODE_FLUSH 0x0002
753 #define HAMMER_NODE_CRCGOOD 0x0004
754 #define HAMMER_NODE_NEEDSCRC 0x0008
755 #define HAMMER_NODE_NEEDSMIRROR 0x0010
756 #define HAMMER_NODE_CRCBAD 0x0020
757 #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */
758
759 #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD)
760
761 typedef struct hammer_node *hammer_node_t;
762
763 /*
764 * List of locked nodes. This structure is used to lock potentially large
765 * numbers of nodes as an aid for complex B-Tree operations.
766 */
767 struct hammer_node_lock;
768 TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock);
769
770 struct hammer_node_lock {
771 TAILQ_ENTRY(hammer_node_lock) entry;
772 struct hammer_node_lock_list list;
773 struct hammer_node_lock *parent;
774 hammer_node_t node;
775 hammer_node_ondisk_t copy; /* copy of on-disk data */
776 int index; /* index of this node in parent */
777 int count; /* count children */
778 int flags;
779 };
780
781 typedef struct hammer_node_lock *hammer_node_lock_t;
782
783 #define HAMMER_NODE_LOCK_UPDATED 0x0001
784 #define HAMMER_NODE_LOCK_LCACHE 0x0002
785
786 /*
787 * Common I/O management structure - embedded in in-memory structures
788 * which are backed by filesystem buffers.
789 */
790 union hammer_io_structure {
791 struct hammer_io io;
792 struct hammer_volume volume;
793 struct hammer_buffer buffer;
794 };
795
796 typedef union hammer_io_structure *hammer_io_structure_t;
797
798 /*
799 * The reserve structure prevents the blockmap from allocating
800 * out of a reserved bigblock. Such reservations are used by
801 * the direct-write mechanism.
802 *
803 * The structure is also used to hold off on reallocations of
804 * big blocks from the freemap until flush dependancies have
805 * been dealt with.
806 */
807 struct hammer_reserve {
808 RB_ENTRY(hammer_reserve) rb_node;
809 TAILQ_ENTRY(hammer_reserve) delay_entry;
810 int flush_group;
811 int flags;
812 int refs;
813 int zone;
814 int append_off;
815 int32_t bytes_free;
816 hammer_off_t zone_offset;
817 };
818
819 typedef struct hammer_reserve *hammer_reserve_t;
820
821 #define HAMMER_RESF_ONDELAY 0x0001
822 #define HAMMER_RESF_LAYER2FREE 0x0002
823
824 #include "hammer_cursor.h"
825
826 /*
827 * The undo structure tracks recent undos to avoid laying down duplicate
828 * undos within a flush group, saving us a significant amount of overhead.
829 *
830 * This is strictly a heuristic.
831 */
832 #define HAMMER_MAX_UNDOS 1024
833 #define HAMMER_MAX_FLUSHERS 4
834
835 struct hammer_undo {
836 RB_ENTRY(hammer_undo) rb_node;
837 TAILQ_ENTRY(hammer_undo) lru_entry;
838 hammer_off_t offset;
839 int bytes;
840 };
841
842 typedef struct hammer_undo *hammer_undo_t;
843
844 struct hammer_flusher_info;
845 TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info);
846
847 struct hammer_flusher {
848 int signal; /* flusher thread sequencer */
849 int done; /* last completed flush group */
850 int next; /* next unallocated flg seqno */
851 int group_lock; /* lock sequencing of the next flush */
852 int exiting; /* request master exit */
853 thread_t td; /* master flusher thread */
854 hammer_tid_t tid; /* last flushed transaction id */
855 int finalize_want; /* serialize finalization */
856 struct hammer_lock finalize_lock; /* serialize finalization */
857 struct hammer_transaction trans; /* shared transaction */
858 struct hammer_flusher_info_list run_list;
859 struct hammer_flusher_info_list ready_list;
860 };
861
862 #define HAMMER_FLUSH_UNDOS_RELAXED 0
863 #define HAMMER_FLUSH_UNDOS_FORCED 1
864 #define HAMMER_FLUSH_UNDOS_AUTO 2
865 /*
866 * Internal hammer mount data structure
867 */
868 struct hammer_mount {
869 struct mount *mp;
870 /*struct vnode *rootvp;*/
871 struct hammer_ino_rb_tree rb_inos_root;
872 struct hammer_redo_rb_tree rb_redo_root;
873 struct hammer_vol_rb_tree rb_vols_root;
874 struct hammer_nod_rb_tree rb_nods_root;
875 struct hammer_und_rb_tree rb_undo_root;
876 struct hammer_res_rb_tree rb_resv_root;
877 struct hammer_buf_rb_tree rb_bufs_root;
878 struct hammer_pfs_rb_tree rb_pfsm_root;
879
880 struct hammer_dedup_crc_rb_tree rb_dedup_crc_root;
881 struct hammer_dedup_off_rb_tree rb_dedup_off_root;
882
883 struct hammer_volume *rootvol;
884 struct hammer_base_elm root_btree_beg;
885 struct hammer_base_elm root_btree_end;
886
887 struct malloc_type *m_misc;
888 struct malloc_type *m_inodes;
889
890 int flags; /* HAMMER_MOUNT_xxx flags */
891 int hflags;
892 int ronly;
893 int nvolumes;
894 int volume_iterator;
895 int master_id; /* -1 or 0-15 - clustering and mirroring */
896 int version; /* hammer filesystem version to use */
897 int rsv_inodes; /* reserved space due to dirty inodes */
898 int64_t rsv_databytes; /* reserved space due to record data */
899 int rsv_recs; /* reserved space due to dirty records */
900 int rsv_fromdelay; /* bigblocks reserved due to flush delay */
901 int undo_rec_limit; /* based on size of undo area */
902 int last_newrecords;
903 int count_newrecords;
904
905 int volume_to_remove; /* volume that is currently being removed */
906
907 int count_inodes; /* total number of inodes */
908 int count_iqueued; /* inodes queued to flusher */
909 int count_reclaims; /* inodes pending reclaim by flusher */
910
911 struct hammer_flusher flusher;
912
913 u_int check_interrupt;
914 u_int check_yield;
915 uuid_t fsid;
916 struct hammer_mod_rb_tree volu_root; /* dirty undo buffers */
917 struct hammer_mod_rb_tree undo_root; /* dirty undo buffers */
918 struct hammer_mod_rb_tree data_root; /* dirty data buffers */
919 struct hammer_mod_rb_tree meta_root; /* dirty meta bufs */
920 struct hammer_mod_rb_tree lose_root; /* loose buffers */
921 long locked_dirty_space; /* meta/volu count */
922 long io_running_space; /* io_token */
923 int unused01;
924 int objid_cache_count;
925 int dedup_cache_count;
926 int error; /* critical I/O error */
927 struct krate krate; /* rate limited kprintf */
928 hammer_tid_t asof; /* snapshot mount */
929 hammer_tid_t next_tid;
930 hammer_tid_t flush_tid1; /* flusher tid sequencing */
931 hammer_tid_t flush_tid2; /* flusher tid sequencing */
932 int64_t copy_stat_freebigblocks; /* number of free bigblocks */
933 u_int32_t undo_seqno; /* UNDO/REDO FIFO seqno */
934 u_int32_t recover_stage2_seqno; /* REDO recovery seqno */
935 hammer_off_t recover_stage2_offset; /* REDO recovery offset */
936
937 struct netexport export;
938 struct hammer_lock sync_lock;
939 struct hammer_lock free_lock;
940 struct hammer_lock undo_lock;
941 struct hammer_lock blkmap_lock;
942 struct hammer_lock snapshot_lock;
943 struct hammer_lock volume_lock;
944 struct hammer_blockmap blockmap[HAMMER_MAX_ZONES];
945 struct hammer_undo undos[HAMMER_MAX_UNDOS];
946 int undo_alloc;
947 TAILQ_HEAD(, hammer_undo) undo_lru_list;
948 TAILQ_HEAD(, hammer_reserve) delay_list;
949 struct hammer_flush_group_list flush_group_list;
950 hammer_flush_group_t fill_flush_group;
951 hammer_flush_group_t next_flush_group;
952 TAILQ_HEAD(, hammer_objid_cache) objid_cache_list;
953 TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list;
954 hammer_dedup_cache_t dedup_free_cache;
955 TAILQ_HEAD(, hammer_reclaim) reclaim_list;
956 TAILQ_HEAD(, hammer_io) iorun_list;
957
958 struct lwkt_token fs_token; /* high level */
959 struct lwkt_token io_token; /* low level (IO callback) */
960
961 struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE];
962 };
963
964 typedef struct hammer_mount *hammer_mount_t;
965
966 #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001
967 #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002
968 #define HAMMER_MOUNT_REDO_SYNC 0x0004
969 #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008
970 #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010
971
972 struct hammer_sync_info {
973 int error;
974 int waitfor;
975 };
976
977 /*
978 * Minium buffer cache bufs required to rebalance the B-Tree.
979 * This is because we must hold the children and the children's children
980 * locked. Even this might not be enough if things are horribly out
981 * of balance.
982 */
983 #define HAMMER_REBALANCE_MIN_BUFS \
984 (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS)
985
986
987 #endif
988
989 /*
990 * checkspace slop (8MB chunks), higher numbers are more conservative.
991 */
992 #define HAMMER_CHKSPC_REBLOCK 25
993 #define HAMMER_CHKSPC_MIRROR 20
994 #define HAMMER_CHKSPC_WRITE 20
995 #define HAMMER_CHKSPC_CREATE 20
996 #define HAMMER_CHKSPC_REMOVE 10
997 #define HAMMER_CHKSPC_EMERGENCY 0
998
999 #if defined(_KERNEL)
1000
1001 extern struct vop_ops hammer_vnode_vops;
1002 extern struct vop_ops hammer_spec_vops;
1003 extern struct vop_ops hammer_fifo_vops;
1004 extern struct bio_ops hammer_bioops;
1005
1006 extern int hammer_debug_io;
1007 extern int hammer_debug_general;
1008 extern int hammer_debug_debug;
1009 extern int hammer_debug_inode;
1010 extern int hammer_debug_locks;
1011 extern int hammer_debug_btree;
1012 extern int hammer_debug_tid;
1013 extern int hammer_debug_recover;
1014 extern int hammer_debug_recover_faults;
1015 extern int hammer_debug_critical;
1016 extern int hammer_cluster_enable;
1017 extern int hammer_live_dedup;
1018 extern int hammer_tdmux_ticks;
1019 extern int hammer_count_fsyncs;
1020 extern int hammer_count_inodes;
1021 extern int hammer_count_iqueued;
1022 extern int hammer_count_reclaims;
1023 extern int hammer_count_records;
1024 extern int hammer_count_record_datas;
1025 extern int hammer_count_volumes;
1026 extern int hammer_count_buffers;
1027 extern int hammer_count_nodes;
1028 extern int64_t hammer_count_extra_space_used;
1029 extern int64_t hammer_stats_btree_lookups;
1030 extern int64_t hammer_stats_btree_searches;
1031 extern int64_t hammer_stats_btree_inserts;
1032 extern int64_t hammer_stats_btree_deletes;
1033 extern int64_t hammer_stats_btree_elements;
1034 extern int64_t hammer_stats_btree_splits;
1035 extern int64_t hammer_stats_btree_iterations;
1036 extern int64_t hammer_stats_btree_root_iterations;
1037 extern int64_t hammer_stats_record_iterations;
1038 extern int64_t hammer_stats_file_read;
1039 extern int64_t hammer_stats_file_write;
1040 extern int64_t hammer_stats_file_iopsr;
1041 extern int64_t hammer_stats_file_iopsw;
1042 extern int64_t hammer_stats_disk_read;
1043 extern int64_t hammer_stats_disk_write;
1044 extern int64_t hammer_stats_inode_flushes;
1045 extern int64_t hammer_stats_commits;
1046 extern int64_t hammer_stats_undo;
1047 extern int64_t hammer_stats_redo;
1048 extern long hammer_count_dirtybufspace;
1049 extern int hammer_count_refedbufs;
1050 extern int hammer_count_reservations;
1051 extern long hammer_count_io_running_read;
1052 extern long hammer_count_io_running_write;
1053 extern int hammer_count_io_locked;
1054 extern long hammer_limit_dirtybufspace;
1055 extern int hammer_limit_recs;
1056 extern int hammer_limit_inode_recs;
1057 extern int hammer_limit_reclaims;
1058 extern int hammer_live_dedup_cache_size;
1059 extern int hammer_limit_redo;
1060 extern int hammer_bio_count;
1061 extern int hammer_verify_zone;
1062 extern int hammer_verify_data;
1063 extern int hammer_write_mode;
1064 extern int hammer_double_buffer;
1065 extern int hammer_btree_full_undo;
1066 extern int hammer_yield_check;
1067 extern int hammer_fsync_mode;
1068 extern int hammer_autoflush;
1069 extern int64_t hammer_contention_count;
1070
1071 extern int64_t hammer_live_dedup_vnode_bcmps;
1072 extern int64_t hammer_live_dedup_device_bcmps;
1073 extern int64_t hammer_live_dedup_findblk_failures;
1074 extern int64_t hammer_live_dedup_bmap_saves;
1075
1076 void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
1077 int error, const char *msg);
1078 int hammer_vop_inactive(struct vop_inactive_args *);
1079 int hammer_vop_reclaim(struct vop_reclaim_args *);
1080 int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp);
1081 struct hammer_inode *hammer_get_inode(hammer_transaction_t trans,
1082 hammer_inode_t dip, int64_t obj_id,
1083 hammer_tid_t asof, u_int32_t localization,
1084 int flags, int *errorp);
1085 struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans,
1086 hammer_inode_t dip, int64_t obj_id,
1087 hammer_tid_t asof, u_int32_t localization,
1088 int flags, int *errorp);
1089 struct hammer_inode *hammer_find_inode(hammer_transaction_t trans,
1090 int64_t obj_id, hammer_tid_t asof,
1091 u_int32_t localization);
1092 void hammer_scan_inode_snapshots(hammer_mount_t hmp,
1093 hammer_inode_info_t iinfo,
1094 int (*callback)(hammer_inode_t ip, void *data),
1095 void *data);
1096 void hammer_put_inode(struct hammer_inode *ip);
1097 void hammer_put_inode_ref(struct hammer_inode *ip);
1098 void hammer_inode_waitreclaims(hammer_transaction_t trans);
1099 void hammer_inode_dirty(struct hammer_inode *ip);
1100
1101 int hammer_unload_volume(hammer_volume_t volume, void *data __unused);
1102 int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused);
1103
1104 int hammer_unload_buffer(hammer_buffer_t buffer, void *data);
1105 int hammer_install_volume(hammer_mount_t hmp, const char *volname,
1106 struct vnode *devvp);
1107 int hammer_mountcheck_volumes(hammer_mount_t hmp);
1108
1109 int hammer_mem_add(hammer_record_t record);
1110 int hammer_ip_lookup(hammer_cursor_t cursor);
1111 int hammer_ip_first(hammer_cursor_t cursor);
1112 int hammer_ip_next(hammer_cursor_t cursor);
1113 int hammer_ip_resolve_data(hammer_cursor_t cursor);
1114 int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1115 hammer_tid_t tid);
1116 int hammer_create_at_cursor(hammer_cursor_t cursor,
1117 hammer_btree_leaf_elm_t leaf, void *udata, int mode);
1118 int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1119 hammer_tid_t delete_tid, u_int32_t delete_ts,
1120 int track, int64_t *stat_bytes);
1121 int hammer_ip_check_directory_empty(hammer_transaction_t trans,
1122 hammer_inode_t ip);
1123 int hammer_sync_hmp(hammer_mount_t hmp, int waitfor);
1124 int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor);
1125
1126 hammer_record_t
1127 hammer_alloc_mem_record(hammer_inode_t ip, int data_len);
1128 void hammer_flush_record_done(hammer_record_t record, int error);
1129 void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident);
1130 void hammer_rel_mem_record(hammer_record_t record);
1131
1132 int hammer_cursor_up(hammer_cursor_t cursor);
1133 int hammer_cursor_up_locked(hammer_cursor_t cursor);
1134 int hammer_cursor_down(hammer_cursor_t cursor);
1135 int hammer_cursor_upgrade(hammer_cursor_t cursor);
1136 int hammer_cursor_upgrade_node(hammer_cursor_t cursor);
1137 void hammer_cursor_downgrade(hammer_cursor_t cursor);
1138 int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1139 void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1140 int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node,
1141 int index);
1142 void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident);
1143 int hammer_lock_ex_try(struct hammer_lock *lock);
1144 void hammer_lock_sh(struct hammer_lock *lock);
1145 int hammer_lock_sh_try(struct hammer_lock *lock);
1146 int hammer_lock_upgrade(struct hammer_lock *lock, int shcount);
1147 void hammer_lock_downgrade(struct hammer_lock *lock, int shcount);
1148 int hammer_lock_status(struct hammer_lock *lock);
1149 void hammer_unlock(struct hammer_lock *lock);
1150 void hammer_ref(struct hammer_lock *lock);
1151 int hammer_ref_interlock(struct hammer_lock *lock);
1152 int hammer_ref_interlock_true(struct hammer_lock *lock);
1153 void hammer_ref_interlock_done(struct hammer_lock *lock);
1154 void hammer_rel(struct hammer_lock *lock);
1155 int hammer_rel_interlock(struct hammer_lock *lock, int locked);
1156 void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked);
1157 int hammer_get_interlock(struct hammer_lock *lock);
1158 int hammer_try_interlock_norefs(struct hammer_lock *lock);
1159 void hammer_put_interlock(struct hammer_lock *lock, int error);
1160
1161 void hammer_sync_lock_ex(hammer_transaction_t trans);
1162 void hammer_sync_lock_sh(hammer_transaction_t trans);
1163 int hammer_sync_lock_sh_try(hammer_transaction_t trans);
1164 void hammer_sync_unlock(hammer_transaction_t trans);
1165
1166 u_int32_t hammer_to_unix_xid(uuid_t *uuid);
1167 void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid);
1168 void hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts);
1169 u_int64_t hammer_timespec_to_time(struct timespec *ts);
1170 int hammer_str_to_tid(const char *str, int *ispfsp,
1171 hammer_tid_t *tidp, u_int32_t *localizationp);
1172 int hammer_is_atatext(const char *name, int len);
1173 hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip,
1174 int64_t namekey);
1175 void hammer_clear_objid(hammer_inode_t dip);
1176 void hammer_destroy_objid_cache(hammer_mount_t hmp);
1177
1178 int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1,
1179 hammer_dedup_cache_t dc2);
1180 int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1,
1181 hammer_dedup_cache_t dc2);
1182 hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip,
1183 hammer_btree_leaf_elm_t leaf);
1184 hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp,
1185 hammer_crc_t crc);
1186 void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset);
1187 void hammer_destroy_dedup_cache(hammer_mount_t hmp);
1188 void hammer_dump_dedup_cache(hammer_mount_t hmp);
1189 int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes,
1190 void *data);
1191
1192 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset,
1193 int bytes);
1194 void hammer_clear_undo_history(hammer_mount_t hmp);
1195 enum vtype hammer_get_vnode_type(u_int8_t obj_type);
1196 int hammer_get_dtype(u_int8_t obj_type);
1197 u_int8_t hammer_get_obj_type(enum vtype vtype);
1198 int64_t hammer_directory_namekey(hammer_inode_t dip, const void *name, int len,
1199 u_int32_t *max_iterationsp);
1200 int hammer_nohistory(hammer_inode_t ip);
1201
1202 int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
1203 hammer_node_cache_t cache, hammer_inode_t ip);
1204 void hammer_normalize_cursor(hammer_cursor_t cursor);
1205 void hammer_done_cursor(hammer_cursor_t cursor);
1206 int hammer_recover_cursor(hammer_cursor_t cursor);
1207 void hammer_unlock_cursor(hammer_cursor_t cursor);
1208 int hammer_lock_cursor(hammer_cursor_t cursor);
1209 hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor);
1210 void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor);
1211
1212 void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode);
1213 void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent,
1214 int index);
1215 void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode,
1216 int index);
1217 void hammer_cursor_moved_element(hammer_node_t oparent, int pindex,
1218 hammer_node_t onode, int oindex,
1219 hammer_node_t nnode, int nindex);
1220 void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent,
1221 hammer_node_t nparent, int nindex);
1222 void hammer_cursor_inserted_element(hammer_node_t node, int index);
1223 void hammer_cursor_deleted_element(hammer_node_t node, int index);
1224 void hammer_cursor_invalidate_cache(hammer_cursor_t cursor);
1225
1226 int hammer_btree_lookup(hammer_cursor_t cursor);
1227 int hammer_btree_first(hammer_cursor_t cursor);
1228 int hammer_btree_last(hammer_cursor_t cursor);
1229 int hammer_btree_extract(hammer_cursor_t cursor, int flags);
1230 int hammer_btree_iterate(hammer_cursor_t cursor);
1231 int hammer_btree_iterate_reverse(hammer_cursor_t cursor);
1232 int hammer_btree_insert(hammer_cursor_t cursor,
1233 hammer_btree_leaf_elm_t elm, int *doprop);
1234 int hammer_btree_delete(hammer_cursor_t cursor);
1235 void hammer_btree_do_propagation(hammer_cursor_t cursor,
1236 hammer_pseudofs_inmem_t pfsm,
1237 hammer_btree_leaf_elm_t leaf);
1238 int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2);
1239 int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key);
1240 int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid);
1241 int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid);
1242
1243 int btree_set_parent(hammer_transaction_t trans, hammer_node_t node,
1244 hammer_btree_elm_t elm);
1245 void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node);
1246 void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache,
1247 int depth);
1248 void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache);
1249 int hammer_btree_lock_children(hammer_cursor_t cursor, int depth,
1250 hammer_node_lock_t parent,
1251 hammer_node_lock_t lcache);
1252 void hammer_btree_lock_copy(hammer_cursor_t cursor,
1253 hammer_node_lock_t parent);
1254 int hammer_btree_sync_copy(hammer_cursor_t cursor,
1255 hammer_node_lock_t parent);
1256 void hammer_btree_unlock_children(hammer_mount_t hmp,
1257 hammer_node_lock_t parent,
1258 hammer_node_lock_t lcache);
1259 int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node);
1260 hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans,
1261 hammer_node_t node, int *parent_indexp,
1262 int *errorp, int try_exclusive);
1263
1264 void hammer_print_btree_node(hammer_node_ondisk_t ondisk);
1265 void hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i);
1266
1267 void *hammer_bread(struct hammer_mount *hmp, hammer_off_t off,
1268 int *errorp, struct hammer_buffer **bufferp);
1269 void *hammer_bnew(struct hammer_mount *hmp, hammer_off_t off,
1270 int *errorp, struct hammer_buffer **bufferp);
1271 void *hammer_bread_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes,
1272 int *errorp, struct hammer_buffer **bufferp);
1273 void *hammer_bnew_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes,
1274 int *errorp, struct hammer_buffer **bufferp);
1275
1276 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp);
1277
1278 hammer_volume_t hammer_get_volume(hammer_mount_t hmp,
1279 int32_t vol_no, int *errorp);
1280 hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
1281 int bytes, int isnew, int *errorp);
1282 void hammer_sync_buffers(hammer_mount_t hmp,
1283 hammer_off_t base_offset, int bytes);
1284 int hammer_del_buffers(hammer_mount_t hmp,
1285 hammer_off_t base_offset,
1286 hammer_off_t zone2_offset, int bytes,
1287 int report_conflicts);
1288
1289 int hammer_ref_volume(hammer_volume_t volume);
1290 int hammer_ref_buffer(hammer_buffer_t buffer);
1291 void hammer_flush_buffer_nodes(hammer_buffer_t buffer);
1292
1293 void hammer_rel_volume(hammer_volume_t volume, int locked);
1294 void hammer_rel_buffer(hammer_buffer_t buffer, int locked);
1295
1296 int hammer_vfs_export(struct mount *mp, int op,
1297 const struct export_args *export);
1298 hammer_node_t hammer_get_node(hammer_transaction_t trans,
1299 hammer_off_t node_offset, int isnew, int *errorp);
1300 void hammer_ref_node(hammer_node_t node);
1301 hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans,
1302 hammer_node_cache_t cache, int *errorp);
1303 void hammer_rel_node(hammer_node_t node);
1304 void hammer_delete_node(hammer_transaction_t trans,
1305 hammer_node_t node);
1306 void hammer_cache_node(hammer_node_cache_t cache,
1307 hammer_node_t node);
1308 void hammer_uncache_node(hammer_node_cache_t cache);
1309 void hammer_flush_node(hammer_node_t node, int locked);
1310
1311 void hammer_dup_buffer(struct hammer_buffer **bufferp,
1312 struct hammer_buffer *buffer);
1313 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans,
1314 hammer_off_t hint, int *errorp);
1315 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1316 u_int16_t rec_type, hammer_off_t *data_offsetp,
1317 struct hammer_buffer **data_bufferp,
1318 hammer_off_t hint, int *errorp);
1319
1320 int hammer_generate_undo(hammer_transaction_t trans,
1321 hammer_off_t zone1_offset, void *base, int len);
1322 int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip,
1323 hammer_off_t file_offset, u_int32_t flags,
1324 void *base, int len);
1325 void hammer_generate_redo_sync(hammer_transaction_t trans);
1326 void hammer_redo_fifo_start_flush(hammer_inode_t ip);
1327 void hammer_redo_fifo_end_flush(hammer_inode_t ip);
1328
1329 void hammer_format_undo(void *base, u_int32_t seqno);
1330 int hammer_upgrade_undo_4(hammer_transaction_t trans);
1331
1332 void hammer_put_volume(struct hammer_volume *volume, int flush);
1333 void hammer_put_buffer(struct hammer_buffer *buffer, int flush);
1334
1335 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans,
1336 hammer_off_t owner, int *errorp);
1337 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset,
1338 hammer_off_t owner, int *errorp);
1339 int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp);
1340 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
1341 int bytes, hammer_off_t hint, int *errorp);
1342 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone,
1343 int bytes, hammer_off_t *zone_offp, int *errorp);
1344 hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone,
1345 int bytes, hammer_off_t zone_offset, int *errorp);
1346 void hammer_blockmap_reserve_complete(hammer_mount_t hmp,
1347 hammer_reserve_t resv);
1348 void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv);
1349 void hammer_blockmap_free(hammer_transaction_t trans,
1350 hammer_off_t bmap_off, int bytes);
1351 int hammer_blockmap_dedup(hammer_transaction_t trans,
1352 hammer_off_t bmap_off, int bytes);
1353 int hammer_blockmap_finalize(hammer_transaction_t trans,
1354 hammer_reserve_t resv,
1355 hammer_off_t bmap_off, int bytes);
1356 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off,
1357 int *curp, int *errorp);
1358 hammer_off_t hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off,
1359 int *errorp);
1360 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t bmap_off,
1361 int *errorp);
1362 int64_t hammer_undo_used(hammer_transaction_t trans);
1363 int64_t hammer_undo_space(hammer_transaction_t trans);
1364 int64_t hammer_undo_max(hammer_mount_t hmp);
1365 int hammer_undo_reclaim(hammer_io_t io);
1366
1367 void hammer_start_transaction(struct hammer_transaction *trans,
1368 struct hammer_mount *hmp);
1369 void hammer_simple_transaction(struct hammer_transaction *trans,
1370 struct hammer_mount *hmp);
1371 void hammer_start_transaction_fls(struct hammer_transaction *trans,
1372 struct hammer_mount *hmp);
1373 void hammer_done_transaction(struct hammer_transaction *trans);
1374 hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count);
1375
1376 void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags);
1377 void hammer_flush_inode(hammer_inode_t ip, int flags);
1378 void hammer_flush_inode_done(hammer_inode_t ip, int error);
1379 void hammer_wait_inode(hammer_inode_t ip);
1380
1381 int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap,
1382 struct ucred *cred, struct hammer_inode *dip,
1383 const char *name, int namelen,
1384 hammer_pseudofs_inmem_t pfsm,
1385 struct hammer_inode **ipp);
1386 void hammer_rel_inode(hammer_inode_t ip, int flush);
1387 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused);
1388 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1389 int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1390 int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused);
1391
1392 int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip);
1393 void hammer_test_inode(hammer_inode_t dip);
1394 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp);
1395 int hammer_update_atime_quick(hammer_inode_t ip);
1396
1397 int hammer_ip_add_directory(struct hammer_transaction *trans,
1398 hammer_inode_t dip, const char *name, int bytes,
1399 hammer_inode_t nip);
1400 int hammer_ip_del_directory(struct hammer_transaction *trans,
1401 hammer_cursor_t cursor, hammer_inode_t dip,
1402 hammer_inode_t ip);
1403 void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record);
1404 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset,
1405 void *data, int bytes, int *errorp);
1406 int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size);
1407 int hammer_ip_add_record(struct hammer_transaction *trans,
1408 hammer_record_t record);
1409 int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1410 int64_t ran_beg, int64_t ran_end, int truncating);
1411 int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip,
1412 int *countp);
1413 int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip,
1414 int64_t offset, void *data, int bytes);
1415 int hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t rec);
1416 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec);
1417 hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans,
1418 u_int32_t localization, int *errorp);
1419 int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1420 hammer_pseudofs_inmem_t pfsm);
1421 int hammer_save_pseudofs(hammer_transaction_t trans,
1422 hammer_pseudofs_inmem_t pfsm);
1423 int hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization);
1424 void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm);
1425 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
1426 struct ucred *cred);
1427
1428 void hammer_io_init(hammer_io_t io, hammer_volume_t volume,
1429 enum hammer_io_type type);
1430 int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit);
1431 void hammer_io_advance(struct hammer_io *io);
1432 int hammer_io_new(struct vnode *devvp, struct hammer_io *io);
1433 int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset);
1434 struct buf *hammer_io_release(struct hammer_io *io, int flush);
1435 void hammer_io_flush(struct hammer_io *io, int reclaim);
1436 void hammer_io_wait(struct hammer_io *io);
1437 void hammer_io_waitdep(struct hammer_io *io);
1438 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush);
1439 int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1440 hammer_btree_leaf_elm_t leaf);
1441 int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1442 hammer_btree_leaf_elm_t leaf);
1443 int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1444 hammer_record_t record);
1445 void hammer_io_direct_wait(hammer_record_t record);
1446 void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf);
1447 void hammer_io_write_interlock(hammer_io_t io);
1448 void hammer_io_done_interlock(hammer_io_t io);
1449 void hammer_io_clear_modify(struct hammer_io *io, int inval);
1450 void hammer_io_clear_modlist(struct hammer_io *io);
1451 void hammer_io_flush_sync(hammer_mount_t hmp);
1452 void hammer_io_clear_error(struct hammer_io *io);
1453 void hammer_io_clear_error_noassert(struct hammer_io *io);
1454 void hammer_io_notmeta(hammer_buffer_t buffer);
1455 void hammer_io_limit_backlog(hammer_mount_t hmp);
1456
1457 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
1458 void *base, int len);
1459 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
1460 void *base, int len);
1461 void hammer_modify_volume_done(hammer_volume_t volume);
1462 void hammer_modify_buffer_done(hammer_buffer_t buffer);
1463
1464 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
1465 struct hammer_ioc_reblock *reblock);
1466 int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip,
1467 struct hammer_ioc_rebalance *rebal);
1468 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
1469 struct hammer_ioc_prune *prune);
1470 int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
1471 struct hammer_ioc_mirror_rw *mirror);
1472 int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
1473 struct hammer_ioc_mirror_rw *mirror);
1474 int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1475 struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs);
1476 int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1477 struct hammer_ioc_pseudofs_rw *pfs);
1478 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1479 struct hammer_ioc_pseudofs_rw *pfs);
1480 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1481 struct hammer_ioc_pseudofs_rw *pfs);
1482 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1483 struct hammer_ioc_pseudofs_rw *pfs);
1484 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1485 struct hammer_ioc_pseudofs_rw *pfs);
1486 int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
1487 struct hammer_ioc_volume *ioc);
1488 int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
1489 struct hammer_ioc_volume *ioc);
1490 int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
1491 struct hammer_ioc_volume_list *ioc);
1492 int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip,
1493 struct hammer_ioc_dedup *dedup);
1494
1495 int hammer_signal_check(hammer_mount_t hmp);
1496
1497 void hammer_flusher_create(hammer_mount_t hmp);
1498 void hammer_flusher_destroy(hammer_mount_t hmp);
1499 void hammer_flusher_sync(hammer_mount_t hmp);
1500 int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg);
1501 int hammer_flusher_async_one(hammer_mount_t hmp);
1502 int hammer_flusher_running(hammer_mount_t hmp);
1503 void hammer_flusher_wait(hammer_mount_t hmp, int seq);
1504 void hammer_flusher_wait_next(hammer_mount_t hmp);
1505 int hammer_flusher_meta_limit(hammer_mount_t hmp);
1506 int hammer_flusher_meta_halflimit(hammer_mount_t hmp);
1507 int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter);
1508 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
1509 void hammer_flusher_finalize(hammer_transaction_t trans, int final);
1510 int hammer_flusher_haswork(hammer_mount_t hmp);
1511 void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed);
1512
1513 int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol);
1514 int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol);
1515 void hammer_recover_flush_buffers(hammer_mount_t hmp,
1516 hammer_volume_t root_volume, int final);
1517
1518 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap);
1519 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk);
1520 void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf);
1521
1522 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap);
1523 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk);
1524 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk);
1525 int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf);
1526 void hkprintf(const char *ctl, ...) __printflike(1, 2);
1527 udev_t hammer_fsid_to_udev(uuid_t *uuid);
1528
1529
1530 int hammer_blocksize(int64_t file_offset);
1531 int hammer_blockoff(int64_t file_offset);
1532 int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2);
1533
1534 /*
1535 * Shortcut for _hammer_checkspace(), used all over the code.
1536 */
1537 static __inline int
1538 hammer_checkspace(hammer_mount_t hmp, int slop)
1539 {
1540 return(_hammer_checkspace(hmp, slop, NULL));
1541 }
1542
1543 #endif
1544
1545 #ifdef _KERNEL
1546 static __inline void
1547 hammer_wait_mem_record(hammer_record_t record)
1548 {
1549 hammer_wait_mem_record_ident(record, "hmmwai");
1550 }
1551
1552 static __inline void
1553 hammer_lock_ex(struct hammer_lock *lock)
1554 {
1555 hammer_lock_ex_ident(lock, "hmrlck");
1556 }
1557
1558 /*
1559 * Indicate that a B-Tree node is being modified.
1560 */
1561 static __inline void
1562 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node)
1563 {
1564 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1565 hammer_modify_buffer(trans, node->buffer, NULL, 0);
1566 }
1567
1568 static __inline void
1569 hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node)
1570 {
1571 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1572 hammer_modify_buffer(trans, node->buffer,
1573 node->ondisk, sizeof(*node->ondisk));
1574 }
1575
1576 static __inline void
1577 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node,
1578 void *base, int len)
1579 {
1580 hammer_crc_t *crcptr;
1581
1582 KKASSERT((char *)base >= (char *)node->ondisk &&
1583 (char *)base + len <=
1584 (char *)node->ondisk + sizeof(*node->ondisk));
1585 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1586
1587 if (hammer_btree_full_undo) {
1588 hammer_modify_node_all(trans, node);
1589 } else {
1590 hammer_modify_buffer(trans, node->buffer, base, len);
1591 crcptr = &node->ondisk->crc;
1592 hammer_modify_buffer(trans, node->buffer,
1593 crcptr, sizeof(hammer_crc_t));
1594 --node->buffer->io.modify_refs; /* only want one ref */
1595 }
1596 }
1597
1598 /*
1599 * Indicate that the specified modifications have been completed.
1600 *
1601 * Do not try to generate the crc here, it's very expensive to do and a
1602 * sequence of insertions or deletions can result in many calls to this
1603 * function on the same node.
1604 */
1605 static __inline void
1606 hammer_modify_node_done(hammer_node_t node)
1607 {
1608 node->flags |= HAMMER_NODE_CRCGOOD;
1609 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) {
1610 node->flags |= HAMMER_NODE_NEEDSCRC;
1611 node->buffer->io.gencrc = 1;
1612 hammer_ref_node(node);
1613 }
1614 hammer_modify_buffer_done(node->buffer);
1615 }
1616 #endif
1617
1618 #define hammer_modify_volume_field(trans, vol, field) \
1619 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1620 sizeof((vol)->ondisk->field))
1621
1622 #define hammer_modify_node_field(trans, node, field) \
1623 hammer_modify_node(trans, node, &(node)->ondisk->field, \
1624 sizeof((node)->ondisk->field))
1625
1626 #ifdef _KERNEL
1627 /*
1628 * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly
1629 * created directories for HAMMER version 2 or greater and causes
1630 * directory entries to be placed the inode localization zone in
1631 * the B-Tree instead of the misc zone.
1632 *
1633 * This greatly improves localization between directory entries and
1634 * inodes
1635 */
1636 static __inline u_int32_t
1637 hammer_dir_localization(hammer_inode_t dip)
1638 {
1639 if (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIR_LOCAL_INO)
1640 return(HAMMER_LOCALIZE_INODE);
1641 else
1642 return(HAMMER_LOCALIZE_MISC);
1643 }
1644 #endif
Cache object: 2fa66e89aac0644dacb11df22aca0859
|