The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/reiserfs/journal.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2 ** Write ahead logging implementation copyright Chris Mason 2000
    3 **
    4 ** The background commits make this code very interelated, and 
    5 ** overly complex.  I need to rethink things a bit....The major players:
    6 **
    7 ** journal_begin -- call with the number of blocks you expect to log.  
    8 **                  If the current transaction is too
    9 **                  old, it will block until the current transaction is 
   10 **                  finished, and then start a new one.
   11 **                  Usually, your transaction will get joined in with 
   12 **                  previous ones for speed.
   13 **
   14 ** journal_join  -- same as journal_begin, but won't block on the current 
   15 **                  transaction regardless of age.  Don't ever call
   16 **                  this.  Ever.  There are only two places it should be 
   17 **                  called from, and they are both inside this file.
   18 **
   19 ** journal_mark_dirty -- adds blocks into this transaction.  clears any flags 
   20 **                       that might make them get sent to disk
   21 **                       and then marks them BH_JDirty.  Puts the buffer head 
   22 **                       into the current transaction hash.  
   23 **
   24 ** journal_end -- if the current transaction is batchable, it does nothing
   25 **                   otherwise, it could do an async/synchronous commit, or
   26 **                   a full flush of all log and real blocks in the 
   27 **                   transaction.
   28 **
   29 ** flush_old_commits -- if the current transaction is too old, it is ended and 
   30 **                      commit blocks are sent to disk.  Forces commit blocks 
   31 **                      to disk for all backgrounded commits that have been 
   32 **                      around too long.
   33 **                   -- Note, if you call this as an immediate flush from 
   34 **                      from within kupdate, it will ignore the immediate flag
   35 **
   36 ** The commit thread -- a writer process for async commits.  It allows a 
   37 **                      a process to request a log flush on a task queue.
   38 **                      the commit will happen once the commit thread wakes up.
   39 **                      The benefit here is the writer (with whatever
   40 **                      related locks it has) doesn't have to wait for the
   41 **                      log blocks to hit disk if it doesn't want to.
   42 */
   43 
   44 #include <linux/config.h>
   45 #include <asm/uaccess.h>
   46 #include <asm/system.h>
   47 
   48 #include <linux/sched.h>
   49 #include <asm/semaphore.h>
   50 
   51 #include <linux/vmalloc.h>
   52 #include <linux/reiserfs_fs.h>
   53 
   54 #include <linux/kernel.h>
   55 #include <linux/errno.h>
   56 #include <linux/fcntl.h>
   57 #include <linux/locks.h>
   58 #include <linux/stat.h>
   59 #include <linux/string.h>
   60 #include <linux/smp_lock.h>
   61 
   62 /* the number of mounted filesystems.  This is used to decide when to
   63 ** start and kill the commit thread
   64 */
   65 static int reiserfs_mounted_fs_count = 0 ;
   66 
   67 /* wake this up when you add something to the commit thread task queue */
   68 DECLARE_WAIT_QUEUE_HEAD(reiserfs_commit_thread_wait) ;
   69 
   70 /* wait on this if you need to be sure you task queue entries have been run */
   71 static DECLARE_WAIT_QUEUE_HEAD(reiserfs_commit_thread_done) ;
   72 DECLARE_TASK_QUEUE(reiserfs_commit_thread_tq) ;
   73 
   74 #define JOURNAL_TRANS_HALF 1018   /* must be correct to keep the desc and commit
   75                                      structs at 4k */
   76 #define BUFNR 64 /*read ahead */
   77 
   78 /* cnode stat bits.  Move these into reiserfs_fs.h */
   79 
   80 #define BLOCK_FREED 2           /* this block was freed, and can't be written.  */
   81 #define BLOCK_FREED_HOLDER 3    /* this block was freed during this transaction, and can't be written */
   82 
   83 #define BLOCK_NEEDS_FLUSH 4     /* used in flush_journal_list */
   84 
   85 /* flags for do_journal_end */
   86 #define FLUSH_ALL   1           /* flush commit and real blocks */
   87 #define COMMIT_NOW  2           /* end and commit this transaction */
   88 #define WAIT        4           /* wait for the log blocks to hit the disk*/
   89 
   90 /* state bits for the journal */
   91 #define WRITERS_BLOCKED 1      /* set when new writers not allowed */
   92 
   93 static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
   94 static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
   95 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall)  ;
   96 static int can_dirty(struct reiserfs_journal_cnode *cn) ;
   97 static int remove_from_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_head *bh, int remove_freed);
   98 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
   99 static int release_journal_dev( struct super_block *super,
  100                                 struct reiserfs_journal *journal );
  101 static void init_journal_hash(struct super_block *p_s_sb) {
  102   memset(SB_JOURNAL(p_s_sb)->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
  103 }
  104 
  105 /*
  106 ** clears BH_Dirty and sticks the buffer on the clean list.  Called because I can't allow refile_buffer to
  107 ** make schedule happen after I've freed a block.  Look at remove_from_transaction and journal_mark_freed for
  108 ** more details.
  109 */
  110 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
  111   if (bh) {
  112     clear_bit(BH_Dirty, &bh->b_state) ;
  113     refile_buffer(bh) ;
  114   }
  115   return 0 ;
  116 }
  117 
  118 static struct reiserfs_bitmap_node *
  119 allocate_bitmap_node(struct super_block *p_s_sb) {
  120   struct reiserfs_bitmap_node *bn ;
  121   static int id = 0 ;
  122 
  123   bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, p_s_sb) ;
  124   if (!bn) {
  125     return NULL ;
  126   }
  127   bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb) ;
  128   if (!bn->data) {
  129     reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  130     return NULL ;
  131   }
  132   bn->id = id++ ;
  133   memset(bn->data, 0, p_s_sb->s_blocksize) ;
  134   INIT_LIST_HEAD(&bn->list) ;
  135   return bn ;
  136 }
  137 
  138 static struct reiserfs_bitmap_node *
  139 get_bitmap_node(struct super_block *p_s_sb) {
  140   struct reiserfs_bitmap_node *bn = NULL;
  141   struct list_head *entry = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
  142 
  143   SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes++ ;
  144 repeat:
  145 
  146   if(entry != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
  147     bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
  148     list_del(entry) ;
  149     memset(bn->data, 0, p_s_sb->s_blocksize) ;
  150     SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
  151     return bn ;
  152   }
  153   bn = allocate_bitmap_node(p_s_sb) ;
  154   if (!bn) {
  155     yield();
  156     goto repeat ;
  157   }
  158   return bn ;
  159 }
  160 static inline void free_bitmap_node(struct super_block *p_s_sb,
  161                                     struct reiserfs_bitmap_node *bn) {
  162   SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes-- ;
  163   if (SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
  164     reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
  165     reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  166   } else {
  167     list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
  168     SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
  169   }
  170 }
  171 
  172 static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
  173   int i ;
  174   struct reiserfs_bitmap_node *bn = NULL ;
  175   for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
  176     bn = allocate_bitmap_node(p_s_sb) ;
  177     if (bn) {
  178       list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
  179       SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
  180     } else {
  181       break ; // this is ok, we'll try again when more are needed 
  182     }
  183   }
  184 }
  185 
  186 static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
  187                                   struct reiserfs_list_bitmap *jb) {
  188   int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
  189   int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
  190 
  191   if (!jb->bitmaps[bmap_nr]) {
  192     jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
  193   }
  194   set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
  195   return 0 ;
  196 }
  197 
  198 static void cleanup_bitmap_list(struct super_block *p_s_sb,
  199                                 struct reiserfs_list_bitmap *jb) {
  200   int i;
  201   for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
  202     if (jb->bitmaps[i]) {
  203       free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
  204       jb->bitmaps[i] = NULL ;
  205     }
  206   }
  207 }
  208 
  209 /*
  210 ** only call this on FS unmount.
  211 */
  212 static int free_list_bitmaps(struct super_block *p_s_sb,
  213                              struct reiserfs_list_bitmap *jb_array) {
  214   int i ;
  215   struct reiserfs_list_bitmap *jb ;
  216   for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  217     jb = jb_array + i ;
  218     jb->journal_list = NULL ;
  219     cleanup_bitmap_list(p_s_sb, jb) ;
  220     vfree(jb->bitmaps) ;
  221     jb->bitmaps = NULL ;
  222   }
  223   return 0;
  224 }
  225 
  226 static int free_bitmap_nodes(struct super_block *p_s_sb) {
  227   struct list_head *next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
  228   struct reiserfs_bitmap_node *bn ;
  229 
  230   while(next != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
  231     bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
  232     list_del(next) ;
  233     reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
  234     reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  235     next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
  236     SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
  237   }
  238 
  239   return 0 ;
  240 }
  241 
  242 /*
  243 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. 
  244 ** jb_array is the array to be filled in.
  245 */
  246 int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
  247                                    struct reiserfs_list_bitmap *jb_array,
  248                                    int bmap_nr) {
  249   int i ;
  250   int failed = 0 ;
  251   struct reiserfs_list_bitmap *jb ;
  252   int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
  253 
  254   for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  255     jb = jb_array + i ;
  256     jb->journal_list = NULL ;
  257     jb->bitmaps = vmalloc( mem ) ;
  258     if (!jb->bitmaps) {
  259       reiserfs_warning(p_s_sb, "clm-2000, unable to allocate bitmaps for journal lists\n") ;
  260       failed = 1;   
  261       break ;
  262     }
  263     memset(jb->bitmaps, 0, mem) ;
  264   }
  265   if (failed) {
  266     free_list_bitmaps(p_s_sb, jb_array) ;
  267     return -1 ;
  268   }
  269   return 0 ;
  270 }
  271 
  272 /*
  273 ** find an available list bitmap.  If you can't find one, flush a commit list 
  274 ** and try again
  275 */
  276 static struct reiserfs_list_bitmap *
  277 get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
  278   int i,j ; 
  279   struct reiserfs_list_bitmap *jb = NULL ;
  280 
  281   for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
  282     i = SB_JOURNAL(p_s_sb)->j_list_bitmap_index ;
  283     SB_JOURNAL(p_s_sb)->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
  284     jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
  285     if (SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
  286       flush_commit_list(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list, 1) ;
  287       if (!SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
  288         break ;
  289       }
  290     } else {
  291       break ;
  292     }
  293   }
  294   if (jb->journal_list) { /* double check to make sure if flushed correctly */
  295     return NULL ;
  296   }
  297   jb->journal_list = jl ;
  298   return jb ;
  299 }
  300 
  301 /* 
  302 ** allocates a new chunk of X nodes, and links them all together as a list.
  303 ** Uses the cnode->next and cnode->prev pointers
  304 ** returns NULL on failure
  305 */
  306 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
  307   struct reiserfs_journal_cnode *head ;
  308   int i ;
  309   if (num_cnodes <= 0) {
  310     return NULL ;
  311   }
  312   head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
  313   if (!head) {
  314     return NULL ;
  315   }
  316   memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
  317   head[0].prev = NULL ;
  318   head[0].next = head + 1 ;
  319   for (i = 1 ; i < num_cnodes; i++) {
  320     head[i].prev = head + (i - 1) ;
  321     head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
  322   }
  323   head[num_cnodes -1].next = NULL ;
  324   return head ;
  325 }
  326 
  327 /*
  328 ** pulls a cnode off the free list, or returns NULL on failure 
  329 */
  330 static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
  331   struct reiserfs_journal_cnode *cn ;
  332 
  333   reiserfs_check_lock_depth("get_cnode") ;
  334 
  335   if (SB_JOURNAL(p_s_sb)->j_cnode_free <= 0) {
  336     return NULL ;
  337   }
  338   SB_JOURNAL(p_s_sb)->j_cnode_used++ ;
  339   SB_JOURNAL(p_s_sb)->j_cnode_free-- ;
  340   cn = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
  341   if (!cn) {
  342     return cn ;
  343   }
  344   if (cn->next) {
  345     cn->next->prev = NULL ;
  346   }
  347   SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn->next ;
  348   memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
  349   return cn ;
  350 }
  351 
  352 /*
  353 ** returns a cnode to the free list 
  354 */
  355 static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
  356 
  357   reiserfs_check_lock_depth("free_cnode") ;
  358 
  359   SB_JOURNAL(p_s_sb)->j_cnode_used-- ;
  360   SB_JOURNAL(p_s_sb)->j_cnode_free++ ;
  361   /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
  362   cn->next = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
  363   if (SB_JOURNAL(p_s_sb)->j_cnode_free_list) {
  364     SB_JOURNAL(p_s_sb)->j_cnode_free_list->prev = cn ;
  365   }
  366   cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
  367   SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn ;
  368 }
  369 
  370 static int clear_prepared_bits(struct buffer_head *bh) {
  371   clear_bit(BH_JPrepared, &bh->b_state) ;
  372   return 0 ;
  373 }
  374 
  375 /* buffer is in current transaction */
  376 inline int buffer_journaled(const struct buffer_head *bh) {
  377   if (bh)
  378     return test_bit(BH_JDirty, &((struct buffer_head *)bh)->b_state) ;
  379   else
  380     return 0 ;
  381 }
  382 
  383 /* disk block was taken off free list before being in a finished transation, or written to disk
  384 ** journal_new blocks can be reused immediately, for any purpose
  385 */ 
  386 inline int buffer_journal_new(const struct buffer_head *bh) {
  387   if (bh) 
  388     return test_bit(BH_JNew, &((struct buffer_head *)bh)->b_state) ;
  389   else
  390     return 0 ;
  391 }
  392 
  393 inline int mark_buffer_journal_new(struct buffer_head *bh) {
  394   if (bh) {
  395     set_bit(BH_JNew, &bh->b_state) ;
  396   }
  397   return 0 ;
  398 }
  399 
  400 inline int mark_buffer_not_journaled(struct buffer_head *bh) {
  401   if (bh) 
  402     clear_bit(BH_JDirty, &bh->b_state) ;
  403   return 0 ;
  404 }
  405 
  406 /* utility function to force a BUG if it is called without the big
  407 ** kernel lock held.  caller is the string printed just before calling BUG()
  408 */
  409 void reiserfs_check_lock_depth(char *caller) {
  410 #ifdef CONFIG_SMP
  411   if (current->lock_depth < 0) {
  412     printk("%s called without kernel lock held\n", caller) ;
  413     show_reiserfs_locks() ;
  414     BUG() ;
  415   }
  416 #else
  417   ;
  418 #endif
  419 }
  420 
  421 /* return a cnode with same dev, block number and size in table, or null if not found */
  422 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct reiserfs_journal_cnode **table,
  423                                                                   kdev_t dev,long bl,int size) {
  424   struct reiserfs_journal_cnode *cn ;
  425   cn = journal_hash(table, dev, bl) ;
  426   while(cn) {
  427     if ((cn->blocknr == bl) && (cn->dev == dev))
  428       return cn ;
  429     cn = cn->hnext ;
  430   }
  431   return (struct reiserfs_journal_cnode *)0 ;
  432 }
  433 
  434 /* returns a cnode with same size, block number and dev as bh in the current transaction hash.  NULL if not found */
  435 static inline struct reiserfs_journal_cnode *get_journal_hash(struct super_block *p_s_sb, struct buffer_head *bh) {
  436   struct reiserfs_journal_cnode *cn ;
  437   if (bh) {
  438     cn =  get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, bh->b_dev, bh->b_blocknr, bh->b_size) ;
  439   }
  440   else {
  441     return (struct reiserfs_journal_cnode *)0 ;
  442   }
  443   return cn ;
  444 }
  445 
  446 /* once upon a time, the journal would deadlock.  a lot.  Now, when
  447 ** CONFIG_REISERFS_CHECK is defined, anytime someone enters a
  448 ** transaction, it pushes itself into this ugly static list, and pops
  449 ** itself off before calling journal_end.  I made a SysRq key to dump
  450 ** the list, and tell me what the writers are when I'm deadlocked.  */
  451 
  452                                 /* are you depending on the compiler
  453                                    to optimize this function away
  454                                    everywhere it is called? It is not
  455                                    obvious how this works, but I
  456                                    suppose debugging code need not be
  457                                    clear.  -Hans */
  458 static char *journal_writers[512] ;
  459 int push_journal_writer(char *s) {
  460 #ifdef CONFIG_REISERFS_CHECK
  461   int i ;
  462   for (i = 0 ; i < 512 ; i++) {
  463     if (!journal_writers[i]) {
  464       journal_writers[i] = s ;
  465       return i ;
  466     }
  467   }
  468   return -1 ;
  469 #else
  470   return 0 ;
  471 #endif
  472 }
  473 int pop_journal_writer(int index) {
  474 #ifdef CONFIG_REISERFS_CHECK
  475   if (index >= 0) {
  476     journal_writers[index] = NULL ;
  477   }
  478 #endif
  479   return 0 ;
  480 }
  481 
  482 int dump_journal_writers(void) {
  483   int i ;
  484   for (i = 0 ; i < 512 ; i++) {
  485     if (journal_writers[i]) {
  486       printk("%d: %s\n", i, journal_writers[i]) ;
  487     }
  488   }
  489   return 0 ;
  490 }
  491 
  492 /*
  493 ** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated
  494 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
  495 ** being overwritten by a replay after crashing.
  496 **
  497 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting
  498 ** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make
  499 ** sure you never write the block without logging it.
  500 **
  501 ** next_zero_bit is a suggestion about the next block to try for find_forward.
  502 ** when bl is rejected because it is set in a journal list bitmap, we search
  503 ** for the next zero bit in the bitmap that rejected bl.  Then, we return that
  504 ** through next_zero_bit for find_forward to try.
  505 **
  506 ** Just because we return something in next_zero_bit does not mean we won't
  507 ** reject it on the next call to reiserfs_in_journal
  508 **
  509 */
  510 int reiserfs_in_journal(struct super_block *p_s_sb, kdev_t dev, 
  511                         int bmap_nr, int bit_nr, int size, int search_all, 
  512                         unsigned int *next_zero_bit) {
  513   struct reiserfs_journal_cnode *cn ;
  514   struct reiserfs_list_bitmap *jb ;
  515   int i ;
  516   unsigned long bl;
  517 
  518   *next_zero_bit = 0 ; /* always start this at zero. */
  519 
  520   /* we aren't logging all blocks are safe for reuse */
  521   if (reiserfs_dont_log(p_s_sb)) {
  522     return 0 ;
  523   }
  524 
  525   PROC_INFO_INC( p_s_sb, journal.in_journal );
  526   /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
  527   ** if we crash before the transaction that freed it commits,  this transaction won't
  528   ** have committed either, and the block will never be written
  529   */
  530   if (search_all) {
  531     for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  532       PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
  533       jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
  534       if (jb->journal_list && jb->bitmaps[bmap_nr] &&
  535           test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
  536         *next_zero_bit = find_next_zero_bit((unsigned long *)
  537                                      (jb->bitmaps[bmap_nr]->data),
  538                                      p_s_sb->s_blocksize << 3, bit_nr+1) ; 
  539         return 1 ;
  540       }
  541     }
  542   }
  543 
  544   bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
  545   /* is it in any old transactions? */
  546   if (search_all && (cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, dev,bl,size))) {
  547     return 1; 
  548   }
  549 
  550   /* is it in the current transaction.  This should never happen */
  551   if ((cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, dev,bl,size))) {
  552     return 1; 
  553   }
  554 
  555   PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );
  556   /* safe for reuse */
  557   return 0 ;
  558 }
  559 
  560 /* insert cn into table
  561 */
  562 inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
  563   struct reiserfs_journal_cnode *cn_orig ;
  564 
  565   cn_orig = journal_hash(table, cn->dev, cn->blocknr) ;
  566   cn->hnext = cn_orig ;
  567   cn->hprev = NULL ;
  568   if (cn_orig) {
  569     cn_orig->hprev = cn ;
  570   }
  571   journal_hash(table, cn->dev, cn->blocknr) =  cn ;
  572 }
  573 
  574 /* lock the current transaction */
  575 inline static void lock_journal(struct super_block *p_s_sb) {
  576   PROC_INFO_INC( p_s_sb, journal.lock_journal );
  577   while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_wlock)) > 0) {
  578     PROC_INFO_INC( p_s_sb, journal.lock_journal_wait );
  579     sleep_on(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
  580   }
  581   atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 1) ;
  582 }
  583 
  584 /* unlock the current transaction */
  585 inline static void unlock_journal(struct super_block *p_s_sb) {
  586   atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wlock)) ;
  587   wake_up(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
  588 }
  589 
  590 /*
  591 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
  592 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
  593 ** transaction.
  594 */
  595 static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
  596 
  597   struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
  598   if (jb) {
  599     cleanup_bitmap_list(p_s_sb, jb) ;
  600   }
  601   jl->j_list_bitmap->journal_list = NULL ;
  602   jl->j_list_bitmap = NULL ;
  603 }
  604 
  605 /*
  606 ** if this journal list still has commit blocks unflushed, send them to disk.
  607 **
  608 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
  609 ** Before the commit block can by written, every other log block must be safely on disk
  610 **
  611 */
  612 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
  613   int i, count ;
  614   int index = 0 ;
  615   int bn ;
  616   int retry_count = 0 ;
  617   int orig_commit_left = 0 ;
  618   struct buffer_head *tbh = NULL ;
  619   struct reiserfs_journal_list *other_jl ;
  620 
  621   reiserfs_check_lock_depth("flush_commit_list") ;
  622 
  623   if (atomic_read(&jl->j_older_commits_done)) {
  624     return 0 ;
  625   }
  626 
  627   /* before we can put our commit blocks on disk, we have to make sure everyone older than
  628   ** us is on disk too
  629   */
  630   if (jl->j_len <= 0) {
  631     return 0 ;
  632   }
  633   if (flushall) {
  634     /* we _must_ make sure the transactions are committed in order.  Start with the
  635     ** index after this one, wrap all the way around 
  636     */
  637     index = (jl - SB_JOURNAL_LIST(s)) + 1 ;
  638     for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
  639       other_jl = SB_JOURNAL_LIST(s) + ( (index + i) % JOURNAL_LIST_COUNT) ;
  640       if (other_jl && other_jl != jl && other_jl->j_len > 0 && other_jl->j_trans_id > 0 && 
  641           other_jl->j_trans_id <= jl->j_trans_id && (atomic_read(&(jl->j_older_commits_done)) == 0)) {
  642         flush_commit_list(s, other_jl, 0) ;
  643       }
  644     }
  645   }
  646 
  647   count = 0 ;
  648   /* don't flush the commit list for the current transactoin */
  649   if (jl == ((SB_JOURNAL_LIST(s) + SB_JOURNAL_LIST_INDEX(s)))) {
  650     return 0 ;
  651   }
  652 
  653   /* make sure nobody is trying to flush this one at the same time */
  654   if (atomic_read(&(jl->j_commit_flushing))) {
  655     sleep_on(&(jl->j_commit_wait)) ;
  656     if (flushall) {
  657       atomic_set(&(jl->j_older_commits_done), 1) ;
  658     }
  659     return 0 ;
  660   }
  661   
  662   /* this commit is done, exit */
  663   if (atomic_read(&(jl->j_commit_left)) <= 0) {
  664     if (flushall) {
  665       atomic_set(&(jl->j_older_commits_done), 1) ;
  666     }
  667     return 0 ;
  668   }
  669   /* keeps others from flushing while we are flushing */
  670   atomic_set(&(jl->j_commit_flushing), 1) ; 
  671 
  672 
  673   if (jl->j_len > SB_JOURNAL_TRANS_MAX(s)) {
  674     reiserfs_panic(s, "journal-512: flush_commit_list: length is %lu, list number %d\n", jl->j_len, jl - SB_JOURNAL_LIST(s)) ;
  675     return 0 ;
  676   }
  677 
  678   orig_commit_left = atomic_read(&(jl->j_commit_left)) ; 
  679 
  680   /* start by checking all the commit blocks in this transaction.  
  681   ** Add anyone not on disk into tbh.  Stop checking once commit_left <= 1, because that means we
  682   ** only have the commit block left 
  683   */
  684 retry:
  685   count = 0 ;
  686   for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) {  /* everything but commit_bh */
  687     bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) %  SB_ONDISK_JOURNAL_SIZE(s);
  688     tbh = journal_get_hash_table(s, bn) ;
  689 
  690 /* kill this sanity check */
  691 if (count > (orig_commit_left + 2)) {
  692 reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_left(%d)!\n", count, orig_commit_left) ;
  693 }
  694     if (tbh) {
  695       if (buffer_locked(tbh)) { /* wait on it, redo it just to make sure */
  696         wait_on_buffer(tbh) ;
  697         if (!buffer_uptodate(tbh)) {
  698           reiserfs_panic(s, "journal-584, buffer write failed\n") ;
  699         }
  700       } 
  701       if (buffer_dirty(tbh)) {
  702         reiserfs_warning(s, "journal-569: flush_commit_list, block already dirty!\n") ;
  703       } else {                          
  704         mark_buffer_dirty(tbh) ;
  705       }
  706       ll_rw_block(WRITE, 1, &tbh) ;
  707       count++ ;
  708       put_bh(tbh) ; /* once for our get_hash */
  709     } 
  710   }
  711 
  712   /* wait on everyone in tbh before writing commit block*/
  713   if (count > 0) {
  714     for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && 
  715                  i < (jl->j_len + 1) ; i++) {  /* everything but commit_bh */
  716       bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
  717       tbh = journal_get_hash_table(s, bn) ;
  718 
  719       wait_on_buffer(tbh) ;
  720       if (!buffer_uptodate(tbh)) {
  721         reiserfs_panic(s, "journal-601, buffer write failed\n") ;
  722       }
  723       put_bh(tbh) ; /* once for our get_hash */
  724       bforget(tbh) ;    /* once due to original getblk in do_journal_end */
  725       atomic_dec(&(jl->j_commit_left)) ;
  726     }
  727   }
  728 
  729   if (atomic_read(&(jl->j_commit_left)) != 1) { /* just the commit_bh left, flush it without calling getblk for everyone */
  730     if (retry_count < 2) {
  731       reiserfs_warning(s, "journal-582: flush_commit_list, not all log blocks on disk yet, trying again\n") ;
  732       retry_count++ ;
  733       goto retry;
  734     }
  735     reiserfs_panic(s, "journal-563: flush_commit_list: BAD, j_commit_left is %u, should be 1\n", 
  736                    atomic_read(&(jl->j_commit_left)));
  737   }
  738 
  739   mark_buffer_dirty(jl->j_commit_bh) ;
  740   ll_rw_block(WRITE, 1, &(jl->j_commit_bh)) ;
  741   wait_on_buffer(jl->j_commit_bh) ;
  742   if (!buffer_uptodate(jl->j_commit_bh)) {
  743     reiserfs_panic(s, "journal-615: buffer write failed\n") ;
  744   }
  745   atomic_dec(&(jl->j_commit_left)) ;
  746   bforget(jl->j_commit_bh) ;
  747 
  748   /* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */
  749   cleanup_freed_for_journal_list(s, jl) ;
  750 
  751   if (flushall) {
  752     atomic_set(&(jl->j_older_commits_done), 1) ;
  753   }
  754   atomic_set(&(jl->j_commit_flushing), 0) ;
  755   wake_up(&(jl->j_commit_wait)) ;
  756 
  757   s->s_dirt = 1 ;
  758   return 0 ;
  759 }
  760 
  761 /*
  762 ** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or 
  763 ** returns NULL if it can't find anything 
  764 */
  765 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
  766   kdev_t dev = cn->dev;
  767   unsigned long blocknr = cn->blocknr ;
  768 
  769   cn = cn->hprev ;
  770   while(cn) {
  771     if (cn->dev == dev && cn->blocknr == blocknr && cn->jlist) {
  772       return cn->jlist ;
  773     }
  774     cn = cn->hprev ;
  775   }
  776   return NULL ;
  777 }
  778 
  779 
  780 /*
  781 ** once all the real blocks have been flushed, it is safe to remove them from the
  782 ** journal list for this transaction.  Aside from freeing the cnode, this also allows the
  783 ** block to be reallocated for data blocks if it had been deleted.
  784 */
  785 static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
  786   struct buffer_head fake_bh ;
  787   struct reiserfs_journal_cnode *cn, *last ;
  788   cn = jl->j_realblock ;
  789 
  790   /* which is better, to lock once around the whole loop, or
  791   ** to lock for each call to remove_from_journal_list?
  792   */
  793   while(cn) {
  794     if (cn->blocknr != 0) {
  795       if (debug) {
  796         reiserfs_warning(p_s_sb, "block %lu, bh is %d, state %ld\n", cn->blocknr, cn->bh ? 1: 0, 
  797                 cn->state) ;
  798       }
  799       fake_bh.b_blocknr = cn->blocknr ;
  800       fake_bh.b_dev = cn->dev ;
  801       cn->state = 0 ;
  802       remove_from_journal_list(p_s_sb, jl, &fake_bh, 1) ;
  803     }
  804     last = cn ;
  805     cn = cn->next ;
  806     free_cnode(p_s_sb, last) ;
  807   }
  808   jl->j_realblock = NULL ;
  809 }
  810 
  811 /*
  812 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
  813 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
  814 ** releasing blocks in this transaction for reuse as data blocks.
  815 ** called by flush_journal_list, before it calls remove_all_from_journal_list
  816 **
  817 */
  818 static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
  819   struct reiserfs_journal_header *jh ;
  820   if (trans_id >= SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) {
  821     if (buffer_locked((SB_JOURNAL(p_s_sb)->j_header_bh)))  {
  822       wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
  823       if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
  824         reiserfs_panic(p_s_sb, "journal-699: buffer write failed\n") ;
  825       }
  826     }
  827     SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
  828     SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = offset ;
  829     jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
  830     jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
  831     jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
  832     jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
  833     set_bit(BH_Dirty, &(SB_JOURNAL(p_s_sb)->j_header_bh->b_state)) ;
  834     ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
  835     wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ; 
  836     if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
  837       reiserfs_warning( p_s_sb, "reiserfs: journal-837: IO error during journal replay\n" );
  838       return -EIO ;
  839     }
  840   }
  841   return 0 ;
  842 }
  843 
  844 static int update_journal_header_block(struct super_block *p_s_sb, 
  845                                        unsigned long offset, 
  846                                        unsigned long trans_id) {
  847     if (_update_journal_header_block(p_s_sb, offset, trans_id)) {
  848         reiserfs_panic(p_s_sb, "journal-712: buffer write failed\n") ;
  849     }
  850     return 0 ;
  851 }
  852 /* 
  853 ** flush any and all journal lists older than you are 
  854 ** can only be called from flush_journal_list
  855 */
  856 static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, unsigned long trans_id) {
  857   int i, index ;
  858   struct reiserfs_journal_list *other_jl ;
  859 
  860   index = jl - SB_JOURNAL_LIST(p_s_sb) ;
  861   for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
  862     other_jl = SB_JOURNAL_LIST(p_s_sb) + ((index + i) % JOURNAL_LIST_COUNT) ;
  863     if (other_jl && other_jl->j_len > 0 && 
  864         other_jl->j_trans_id > 0 && 
  865         other_jl->j_trans_id < trans_id && 
  866         other_jl != jl) {
  867       /* do not flush all */
  868       flush_journal_list(p_s_sb, other_jl, 0) ; 
  869     }
  870   }
  871   return 0 ;
  872 }
  873 
  874 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
  875     if (buffer_journaled(bh)) {
  876         reiserfs_warning(NULL, "clm-2084: pinned buffer %lu:%s sent to disk\n",
  877                          bh->b_blocknr, kdevname(bh->b_dev)) ;
  878     }
  879     mark_buffer_uptodate(bh, uptodate) ;
  880     unlock_buffer(bh) ;
  881     put_bh(bh) ;
  882 }
  883 static void submit_logged_buffer(struct buffer_head *bh) {
  884     lock_buffer(bh) ;
  885     get_bh(bh) ;
  886     bh->b_end_io = reiserfs_end_buffer_io_sync ;
  887     mark_buffer_notjournal_new(bh) ;
  888     clear_bit(BH_Dirty, &bh->b_state) ;
  889     submit_bh(WRITE, bh) ;
  890 }
  891 
  892 /* flush a journal list, both commit and real blocks
  893 **
  894 ** always set flushall to 1, unless you are calling from inside
  895 ** flush_journal_list
  896 **
  897 ** IMPORTANT.  This can only be called while there are no journal writers, 
  898 ** and the journal is locked.  That means it can only be called from 
  899 ** do_journal_end, or by journal_release
  900 */
  901 static int flush_journal_list(struct super_block *s, 
  902                               struct reiserfs_journal_list *jl, int flushall) {
  903   struct reiserfs_journal_list *pjl ;
  904   struct reiserfs_journal_cnode *cn, *last ;
  905   int count ;
  906   int was_jwait = 0 ;
  907   int was_dirty = 0 ;
  908   struct buffer_head *saved_bh ; 
  909   unsigned long j_len_saved = jl->j_len ;
  910 
  911   if (j_len_saved <= 0) {
  912     return 0 ;
  913   }
  914 
  915   if (atomic_read(&SB_JOURNAL(s)->j_wcount) != 0) {
  916     reiserfs_warning(s, "clm-2048: flush_journal_list called with wcount %d\n",
  917                       atomic_read(&SB_JOURNAL(s)->j_wcount)) ;
  918   }
  919   /* if someone is getting the commit list, we must wait for them */
  920   while (atomic_read(&(jl->j_commit_flushing))) { 
  921     sleep_on(&(jl->j_commit_wait)) ;
  922   }
  923   /* if someone is flushing this list, we must wait for them */
  924   while (atomic_read(&(jl->j_flushing))) {
  925     sleep_on(&(jl->j_flush_wait)) ;
  926   }
  927 
  928   /* this list is now ours, we can change anything we want */
  929   atomic_set(&(jl->j_flushing), 1) ;
  930 
  931   count = 0 ;
  932   if (j_len_saved > SB_JOURNAL_TRANS_MAX(s)) {
  933     reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, list number %d\n", j_len_saved, jl - SB_JOURNAL_LIST(s)) ;
  934     atomic_dec(&(jl->j_flushing)) ;
  935     return 0 ;
  936   }
  937 
  938   /* if all the work is already done, get out of here */
  939   if (atomic_read(&(jl->j_nonzerolen)) <= 0 && 
  940       atomic_read(&(jl->j_commit_left)) <= 0) {
  941     goto flush_older_and_return ;
  942   } 
  943 
  944   /* start by putting the commit list on disk.  This will also flush 
  945   ** the commit lists of any olders transactions
  946   */
  947   flush_commit_list(s, jl, 1) ;
  948 
  949   /* are we done now? */
  950   if (atomic_read(&(jl->j_nonzerolen)) <= 0 && 
  951       atomic_read(&(jl->j_commit_left)) <= 0) {
  952     goto flush_older_and_return ;
  953   }
  954 
  955   /* loop through each cnode, see if we need to write it, 
  956   ** or wait on a more recent transaction, or just ignore it 
  957   */
  958   if (atomic_read(&(SB_JOURNAL(s)->j_wcount)) != 0) {
  959     reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
  960   }
  961   cn = jl->j_realblock ;
  962   while(cn) {
  963     was_jwait = 0 ;
  964     was_dirty = 0 ;
  965     saved_bh = NULL ;
  966     /* blocknr of 0 is no longer in the hash, ignore it */
  967     if (cn->blocknr == 0) {
  968       goto free_cnode ;
  969     }
  970     pjl = find_newer_jl_for_cn(cn) ;
  971     /* the order is important here.  We check pjl to make sure we
  972     ** don't clear BH_JDirty_wait if we aren't the one writing this
  973     ** block to disk
  974     */
  975     if (!pjl && cn->bh) {
  976       saved_bh = cn->bh ;
  977 
  978       /* we do this to make sure nobody releases the buffer while 
  979       ** we are working with it 
  980       */
  981       get_bh(saved_bh) ;
  982 
  983       if (buffer_journal_dirty(saved_bh)) {
  984         was_jwait = 1 ;
  985         mark_buffer_notjournal_dirty(saved_bh) ;
  986         /* undo the inc from journal_mark_dirty */
  987         put_bh(saved_bh) ;
  988       }
  989       if (can_dirty(cn)) {
  990         was_dirty = 1 ;
  991       }
  992     }
  993 
  994     /* if someone has this block in a newer transaction, just make
  995     ** sure they are commited, and don't try writing it to disk
  996     */
  997     if (pjl) {
  998       flush_commit_list(s, pjl, 1) ;
  999       goto free_cnode ;
 1000     }
 1001 
 1002     /* bh == NULL when the block got to disk on its own, OR, 
 1003     ** the block got freed in a future transaction 
 1004     */
 1005     if (saved_bh == NULL) {
 1006       goto free_cnode ;
 1007     }
 1008 
 1009     /* this should never happen.  kupdate_one_transaction has this list
 1010     ** locked while it works, so we should never see a buffer here that
 1011     ** is not marked JDirty_wait
 1012     */
 1013     if ((!was_jwait) && !buffer_locked(saved_bh)) {
 1014 reiserfs_warning(s, "journal-813: BAD! buffer %lu %cdirty %cjwait, not in a newer tranasction\n", saved_bh->b_blocknr,
 1015         was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
 1016     }
 1017     /* kupdate_one_transaction waits on the buffers it is writing, so we
 1018     ** should never see locked buffers here
 1019     */
 1020     if (buffer_locked(saved_bh)) {
 1021       reiserfs_warning(s, "clm-2083: locked buffer %lu in flush_journal_list\n", 
 1022               saved_bh->b_blocknr) ;
 1023       wait_on_buffer(saved_bh) ;
 1024       if (!buffer_uptodate(saved_bh)) {
 1025         reiserfs_panic(s, "journal-923: buffer write failed\n") ;
 1026       }
 1027     } 
 1028     if (was_dirty) { 
 1029       /* we inc again because saved_bh gets decremented at free_cnode */
 1030       get_bh(saved_bh) ;
 1031       set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
 1032       submit_logged_buffer(saved_bh) ;
 1033       count++ ;
 1034     } else {
 1035       reiserfs_warning(s, "clm-2082: Unable to flush buffer %lu in flush_journal_list\n",
 1036               saved_bh->b_blocknr) ;
 1037     }
 1038 free_cnode:
 1039     last = cn ;
 1040     cn = cn->next ;
 1041     if (saved_bh) {
 1042       /* we incremented this to keep others from taking the buffer head away */
 1043       put_bh(saved_bh) ;
 1044       if (atomic_read(&(saved_bh->b_count)) < 0) {
 1045         reiserfs_warning(s, "journal-945: saved_bh->b_count < 0\n") ;
 1046       }
 1047     }
 1048   }
 1049   if (count > 0) {
 1050     cn = jl->j_realblock ;
 1051     while(cn) {
 1052       if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
 1053         if (!cn->bh) {
 1054           reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
 1055         }
 1056         wait_on_buffer(cn->bh) ;
 1057         if (!cn->bh) {
 1058           reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
 1059         }
 1060         if (!buffer_uptodate(cn->bh)) {
 1061           reiserfs_panic(s, "journal-949: buffer write failed\n") ;
 1062         }
 1063         refile_buffer(cn->bh) ;
 1064         brelse(cn->bh) ;
 1065       }
 1066       cn = cn->next ;
 1067     }
 1068   }
 1069 
 1070 flush_older_and_return:
 1071   /* before we can update the journal header block, we _must_ flush all 
 1072   ** real blocks from all older transactions to disk.  This is because
 1073   ** once the header block is updated, this transaction will not be
 1074   ** replayed after a crash
 1075   */
 1076   if (flushall) {
 1077     flush_older_journal_lists(s, jl, jl->j_trans_id) ;
 1078   } 
 1079   
 1080   /* before we can remove everything from the hash tables for this 
 1081   ** transaction, we must make sure it can never be replayed
 1082   **
 1083   ** since we are only called from do_journal_end, we know for sure there
 1084   ** are no allocations going on while we are flushing journal lists.  So,
 1085   ** we only need to update the journal header block for the last list
 1086   ** being flushed
 1087   */
 1088   if (flushall) {
 1089     update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
 1090   }
 1091   remove_all_from_journal_list(s, jl, 0) ;
 1092   jl->j_len = 0 ;
 1093   atomic_set(&(jl->j_nonzerolen), 0) ;
 1094   jl->j_start = 0 ;
 1095   jl->j_realblock = NULL ;
 1096   jl->j_commit_bh = NULL ;
 1097   jl->j_trans_id = 0 ;
 1098   atomic_dec(&(jl->j_flushing)) ;
 1099   wake_up(&(jl->j_flush_wait)) ;
 1100   return 0 ;
 1101 } 
 1102 
 1103 
 1104 static int kupdate_one_transaction(struct super_block *s,
 1105                                     struct reiserfs_journal_list *jl) 
 1106 {
 1107     struct reiserfs_journal_list *pjl ; /* previous list for this cn */
 1108     struct reiserfs_journal_cnode *cn, *walk_cn ;
 1109     unsigned long blocknr ;
 1110     int run = 0 ;
 1111     int orig_trans_id = jl->j_trans_id ;
 1112     struct buffer_head *saved_bh ; 
 1113     int ret = 0 ;
 1114 
 1115     /* if someone is getting the commit list, we must wait for them */
 1116     while (atomic_read(&(jl->j_commit_flushing))) {
 1117         sleep_on(&(jl->j_commit_wait)) ;
 1118     }
 1119     /* if someone is flushing this list, we must wait for them */
 1120     while (atomic_read(&(jl->j_flushing))) {
 1121         sleep_on(&(jl->j_flush_wait)) ;
 1122     }
 1123     /* was it flushed while we slept? */
 1124     if (jl->j_len <= 0 || jl->j_trans_id != orig_trans_id) {
 1125         return 0 ;
 1126     }
 1127 
 1128     /* this list is now ours, we can change anything we want */
 1129     atomic_set(&(jl->j_flushing), 1) ;
 1130 
 1131 loop_start:
 1132     cn = jl->j_realblock ;
 1133     while(cn) {
 1134         saved_bh = NULL ;
 1135         /* if the blocknr == 0, this has been cleared from the hash,
 1136         ** skip it
 1137         */
 1138         if (cn->blocknr == 0) {
 1139             goto next ;
 1140         }
 1141         /* look for a more recent transaction that logged this
 1142         ** buffer.  Only the most recent transaction with a buffer in
 1143         ** it is allowed to send that buffer to disk
 1144         */
 1145         pjl = find_newer_jl_for_cn(cn) ;
 1146         if (run == 0 && !pjl && cn->bh && buffer_journal_dirty(cn->bh) &&
 1147             can_dirty(cn)) 
 1148         {
 1149             if (!test_bit(BH_JPrepared, &cn->bh->b_state)) {
 1150                 set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
 1151                 submit_logged_buffer(cn->bh) ;
 1152             } else {
 1153                 /* someone else is using this buffer.  We can't 
 1154                 ** send it to disk right now because they might
 1155                 ** be changing/logging it.
 1156                 */
 1157                 ret = 1 ;
 1158             }
 1159         } else if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
 1160             clear_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
 1161             if (!pjl && cn->bh) {
 1162                 wait_on_buffer(cn->bh) ;
 1163             }
 1164             /* check again, someone could have logged while we scheduled */
 1165             pjl = find_newer_jl_for_cn(cn) ;
 1166 
 1167             /* before the JDirty_wait bit is set, the 
 1168             ** buffer is added to the hash list.  So, if we are
 1169             ** run in the middle of a do_journal_end, we will notice
 1170             ** if this buffer was logged and added from the latest
 1171             ** transaction.  In this case, we don't want to decrement
 1172             ** b_count
 1173             */
 1174             if (!pjl && cn->bh && buffer_journal_dirty(cn->bh)) {
 1175                 blocknr = cn->blocknr ;
 1176                 walk_cn = cn ;
 1177                 saved_bh= cn->bh ;
 1178                 /* update all older transactions to show this block
 1179                 ** was flushed
 1180                 */
 1181                 mark_buffer_notjournal_dirty(cn->bh) ;
 1182                 while(walk_cn) {
 1183                     if (walk_cn->bh && walk_cn->blocknr == blocknr && 
 1184                          walk_cn->dev == cn->dev) {
 1185                         if (walk_cn->jlist) {
 1186                             atomic_dec(&(walk_cn->jlist->j_nonzerolen)) ;
 1187                         }
 1188                         walk_cn->bh = NULL ;
 1189                     }
 1190                     walk_cn = walk_cn->hnext ;
 1191                 }
 1192                 if (atomic_read(&saved_bh->b_count) < 1) {
 1193                     reiserfs_warning(s, "clm-2081: bad count on %lu\n", 
 1194                                       saved_bh->b_blocknr) ;
 1195                 }
 1196                 brelse(saved_bh) ;
 1197             }
 1198         }
 1199         /*
 1200         ** if the more recent transaction is committed to the log,
 1201         ** this buffer can be considered flushed.  Decrement our
 1202         ** counters to reflect one less buffer that needs writing.
 1203         **
 1204         ** note, this relies on all of the above code being
 1205         ** schedule free once pjl comes back non-null.
 1206         */
 1207         if (pjl && cn->bh && atomic_read(&pjl->j_commit_left) == 0) {
 1208             atomic_dec(&cn->jlist->j_nonzerolen) ;
 1209             cn->bh = NULL ;
 1210         } 
 1211 next:
 1212         cn = cn->next ;
 1213     }
 1214     /* the first run through the loop sends all the dirty buffers to
 1215     ** ll_rw_block.
 1216     ** the second run through the loop does all the accounting
 1217     */
 1218     if (run++ == 0) {
 1219         goto loop_start ;
 1220     }
 1221 
 1222     atomic_set(&(jl->j_flushing), 0) ;
 1223     wake_up(&(jl->j_flush_wait)) ;
 1224     return ret ;
 1225 }
 1226 /* since we never give dirty buffers to bdflush/kupdate, we have to
 1227 ** flush them ourselves.  This runs through the journal lists, finds
 1228 ** old metadata in need of flushing and sends it to disk.
 1229 ** this does not end transactions, commit anything, or free
 1230 ** cnodes.
 1231 **
 1232 ** returns the highest transaction id that was flushed last time
 1233 */
 1234 static unsigned long reiserfs_journal_kupdate(struct super_block *s) {
 1235     struct reiserfs_journal_list *jl ;
 1236     int i ;
 1237     int start ;
 1238     time_t age ;
 1239     int ret = 0 ;
 1240 
 1241     start = SB_JOURNAL_LIST_INDEX(s) ;
 1242 
 1243     /* safety check to prevent flush attempts during a mount */
 1244     if (start < 0) {
 1245         return 0 ;
 1246     }
 1247     i = (start + 1) % JOURNAL_LIST_COUNT ;
 1248     while(i != start) {
 1249         jl = SB_JOURNAL_LIST(s) + i  ;
 1250         age = CURRENT_TIME - jl->j_timestamp ;
 1251         if (jl->j_len > 0 && // age >= (JOURNAL_MAX_COMMIT_AGE * 2) && 
 1252             atomic_read(&(jl->j_nonzerolen)) > 0 &&
 1253             atomic_read(&(jl->j_commit_left)) == 0) {
 1254 
 1255             if (jl->j_trans_id == SB_JOURNAL(s)->j_trans_id) {
 1256                 break ;
 1257             }
 1258             /* if ret was already 1, we want to preserve that */
 1259             ret |= kupdate_one_transaction(s, jl) ;
 1260         } 
 1261         if (atomic_read(&(jl->j_nonzerolen)) > 0) {
 1262             ret |= 1 ;
 1263         }
 1264         i = (i + 1) % JOURNAL_LIST_COUNT ;
 1265     }
 1266     return ret ;
 1267 }
 1268 
 1269 /*
 1270 ** removes any nodes in table with name block and dev as bh.
 1271 ** only touchs the hnext and hprev pointers.
 1272 */
 1273 void remove_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_list *jl,struct buffer_head *bh,
 1274                          int remove_freed){
 1275   struct reiserfs_journal_cnode *cur ;
 1276   struct reiserfs_journal_cnode **head ;
 1277 
 1278   if (!bh)
 1279     return ;
 1280 
 1281   head= &(journal_hash(table, bh->b_dev, bh->b_blocknr)) ;
 1282   if (!head) {
 1283     return ;
 1284   }
 1285   cur = *head ;
 1286   while(cur) {
 1287     if (cur->blocknr == bh->b_blocknr && cur->dev == bh->b_dev && (jl == NULL || jl == cur->jlist) && 
 1288         (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
 1289       if (cur->hnext) {
 1290         cur->hnext->hprev = cur->hprev ;
 1291       }
 1292       if (cur->hprev) {
 1293         cur->hprev->hnext = cur->hnext ;
 1294       } else {
 1295         *head = cur->hnext ;
 1296       }
 1297       cur->blocknr = 0 ;
 1298       cur->dev = 0 ;
 1299       cur->state = 0 ;
 1300       if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
 1301         atomic_dec(&(cur->jlist->j_nonzerolen)) ;
 1302       cur->bh = NULL ;
 1303       cur->jlist = NULL ;
 1304     } 
 1305     cur = cur->hnext ;
 1306   }
 1307 }
 1308 
 1309 static void free_journal_ram(struct super_block *p_s_sb) {
 1310   vfree(SB_JOURNAL(p_s_sb)->j_cnode_free_orig) ;
 1311   free_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap) ;
 1312   free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
 1313   if (SB_JOURNAL(p_s_sb)->j_header_bh) {
 1314     brelse(SB_JOURNAL(p_s_sb)->j_header_bh) ;
 1315   }
 1316   /* j_header_bh is on the journal dev, make sure not to release the journal
 1317    * dev until we brelse j_header_bh
 1318    */
 1319   release_journal_dev(p_s_sb, SB_JOURNAL(p_s_sb));
 1320   vfree(SB_JOURNAL(p_s_sb)) ;
 1321 }
 1322 
 1323 /*
 1324 ** call on unmount.  Only set error to 1 if you haven't made your way out
 1325 ** of read_super() yet.  Any other caller must keep error at 0.
 1326 */
 1327 static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
 1328   struct reiserfs_transaction_handle myth ;
 1329 
 1330   /* we only want to flush out transactions if we were called with error == 0
 1331   */
 1332   if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
 1333     /* end the current trans */
 1334     do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
 1335 
 1336     /* make sure something gets logged to force our way into the flush code */
 1337     journal_join(&myth, p_s_sb, 1) ;
 1338     reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
 1339     journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
 1340     do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
 1341   }
 1342 
 1343   /* we decrement before we wake up, because the commit thread dies off
 1344   ** when it has been woken up and the count is <= 0
 1345   */
 1346   reiserfs_mounted_fs_count-- ;
 1347   wake_up(&reiserfs_commit_thread_wait) ;
 1348   sleep_on(&reiserfs_commit_thread_done) ;
 1349 
 1350   free_journal_ram(p_s_sb) ;
 1351 
 1352   return 0 ;
 1353 }
 1354 
 1355 /*
 1356 ** call on unmount.  flush all journal trans, release all alloc'd ram
 1357 */
 1358 int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
 1359   return do_journal_release(th, p_s_sb, 0) ;
 1360 }
 1361 /*
 1362 ** only call from an error condition inside reiserfs_read_super!
 1363 */
 1364 int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
 1365   return do_journal_release(th, p_s_sb, 1) ;
 1366 }
 1367 
 1368 /* compares description block with commit block.  returns 1 if they differ, 0 if they are the same */
 1369 static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc, 
 1370                                        struct reiserfs_journal_commit *commit) {
 1371   if (le32_to_cpu(commit->j_trans_id) != le32_to_cpu(desc->j_trans_id) || 
 1372       le32_to_cpu(commit->j_len) != le32_to_cpu(desc->j_len) || 
 1373       le32_to_cpu(commit->j_len) > SB_JOURNAL_TRANS_MAX(p_s_sb) || 
 1374       le32_to_cpu(commit->j_len) <= 0 
 1375   ) {
 1376     return 1 ;
 1377   }
 1378   return 0 ;
 1379 }
 1380 /* returns 0 if it did not find a description block  
 1381 ** returns -1 if it found a corrupt commit block
 1382 ** returns 1 if both desc and commit were valid 
 1383 */
 1384 static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
 1385   struct reiserfs_journal_desc *desc ;
 1386   struct reiserfs_journal_commit *commit ;
 1387   struct buffer_head *c_bh ;
 1388   unsigned long offset ;
 1389 
 1390   if (!d_bh)
 1391       return 0 ;
 1392 
 1393   desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
 1394   if (le32_to_cpu(desc->j_len) > 0 && !memcmp(desc->j_magic, JOURNAL_DESC_MAGIC, 8)) {
 1395     if (oldest_invalid_trans_id && *oldest_invalid_trans_id && le32_to_cpu(desc->j_trans_id) > *oldest_invalid_trans_id) {
 1396       reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
 1397                       "is valid returning because trans_id %d is greater than "
 1398                       "oldest_invalid %lu\n", le32_to_cpu(desc->j_trans_id), 
 1399                        *oldest_invalid_trans_id);
 1400       return 0 ;
 1401     }
 1402     if (newest_mount_id && *newest_mount_id > le32_to_cpu(desc->j_mount_id)) {
 1403       reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
 1404                      "is valid returning because mount_id %d is less than "
 1405                      "newest_mount_id %lu\n", desc->j_mount_id, 
 1406                      *newest_mount_id) ;
 1407       return -1 ;
 1408     }
 1409     if ( le32_to_cpu(desc->j_len) > SB_JOURNAL_TRANS_MAX(p_s_sb) ) {
 1410       reiserfs_warning(p_s_sb, "journal-2018: Bad transaction length %d encountered, ignoring transaction\n", le32_to_cpu(desc->j_len));
 1411       return -1 ;
 1412     }
 1413     offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
 1414 
 1415     /* ok, we have a journal description block, lets see if the transaction was valid */
 1416     c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
 1417                  ((offset + le32_to_cpu(desc->j_len) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
 1418     if (!c_bh)
 1419       return 0 ;
 1420     commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
 1421     if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
 1422       reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 
 1423                      "journal_transaction_is_valid, commit offset %ld had bad "
 1424                      "time %d or length %d\n", 
 1425                      c_bh->b_blocknr -  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
 1426                      le32_to_cpu(commit->j_trans_id), 
 1427                      le32_to_cpu(commit->j_len));
 1428       brelse(c_bh) ;
 1429       if (oldest_invalid_trans_id) {
 1430         *oldest_invalid_trans_id = le32_to_cpu(desc->j_trans_id) ;
 1431         reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
 1432                        "transaction_is_valid setting oldest invalid trans_id "
 1433                        "to %d\n", le32_to_cpu(desc->j_trans_id)) ;
 1434         }
 1435       return -1; 
 1436     }
 1437     brelse(c_bh) ;
 1438     reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
 1439                    "transaction start offset %lu, len %d id %d\n", 
 1440                    d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
 1441                    le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_trans_id)) ;
 1442     return 1 ;
 1443   } else {
 1444     return 0 ;
 1445   }
 1446 }
 1447 
 1448 static void brelse_array(struct buffer_head **heads, int num) {
 1449   int i ;
 1450   for (i = 0 ; i < num ; i++) {
 1451     brelse(heads[i]) ;
 1452   }
 1453 }
 1454 
 1455 /*
 1456 ** given the start, and values for the oldest acceptable transactions,
 1457 ** this either reads in a replays a transaction, or returns because the transaction
 1458 ** is invalid, or too old.
 1459 */
 1460 static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start, 
 1461                                     unsigned long oldest_trans_id, unsigned long newest_mount_id) {
 1462   struct reiserfs_journal_desc *desc ;
 1463   struct reiserfs_journal_commit *commit ;
 1464   unsigned long trans_id = 0 ;
 1465   struct buffer_head *c_bh ;
 1466   struct buffer_head *d_bh ;
 1467   struct buffer_head **log_blocks = NULL ;
 1468   struct buffer_head **real_blocks = NULL ;
 1469   unsigned long trans_offset ;
 1470   int i;
 1471 
 1472   d_bh = journal_bread(p_s_sb, cur_dblock) ;
 1473   if (!d_bh)
 1474     return 1 ;
 1475   desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
 1476   trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
 1477   reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
 1478                  "journal_read_transaction, offset %lu, len %d mount_id %d\n", 
 1479                  d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
 1480                  le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_mount_id)) ;
 1481   if (le32_to_cpu(desc->j_trans_id) < oldest_trans_id) {
 1482     reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
 1483                    "journal_read_trans skipping because %lu is too old\n", 
 1484                    cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
 1485     brelse(d_bh) ;
 1486     return 1 ;
 1487   }
 1488   if (le32_to_cpu(desc->j_mount_id) != newest_mount_id) {
 1489     reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
 1490                    "journal_read_trans skipping because %d is != "
 1491                    "newest_mount_id %lu\n", le32_to_cpu(desc->j_mount_id), 
 1492                     newest_mount_id) ;
 1493     brelse(d_bh) ;
 1494     return 1 ;
 1495   }
 1496   c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
 1497                 ((trans_offset + le32_to_cpu(desc->j_len) + 1) % 
 1498                  SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
 1499   if (!c_bh) {
 1500     brelse(d_bh) ;
 1501     return 1 ;
 1502   }
 1503   commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
 1504   if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
 1505     reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
 1506                    "commit offset %ld had bad time %d or length %d\n", 
 1507                    c_bh->b_blocknr -  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
 1508                    le32_to_cpu(commit->j_trans_id), le32_to_cpu(commit->j_len));
 1509     brelse(c_bh) ;
 1510     brelse(d_bh) ;
 1511     return 1; 
 1512   }
 1513   trans_id = le32_to_cpu(desc->j_trans_id) ;
 1514   /* now we know we've got a good transaction, and it was inside the valid time ranges */
 1515   log_blocks = reiserfs_kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
 1516   real_blocks = reiserfs_kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
 1517   if (!log_blocks  || !real_blocks) {
 1518     brelse(c_bh) ;
 1519     brelse(d_bh) ;
 1520     reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1521     reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1522     reiserfs_warning(p_s_sb, "journal-1169: kmalloc failed, unable to mount FS\n") ;
 1523     return -1 ;
 1524   }
 1525   /* get all the buffer heads */
 1526   for(i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
 1527     log_blocks[i] = journal_getblk(p_s_sb,  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
 1528     if (i < JOURNAL_TRANS_HALF) {
 1529       real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
 1530     } else {
 1531       real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF])) ;
 1532     }
 1533     if ( real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb) ) {
 1534       reiserfs_warning(p_s_sb, "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem\n");
 1535       goto abort_replay;
 1536     }
 1537     /* make sure we don't try to replay onto log or reserved area */
 1538     if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
 1539       reiserfs_warning(p_s_sb, "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block\n") ;
 1540 abort_replay:
 1541       brelse_array(log_blocks, i) ;
 1542       brelse_array(real_blocks, i) ;
 1543       brelse(c_bh) ;
 1544       brelse(d_bh) ;
 1545       reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1546       reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1547       return -1 ;
 1548     }
 1549   }
 1550   /* read in the log blocks, memcpy to the corresponding real block */
 1551   ll_rw_block(READ, le32_to_cpu(desc->j_len), log_blocks) ;
 1552   for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
 1553     wait_on_buffer(log_blocks[i]) ;
 1554     if (!buffer_uptodate(log_blocks[i])) {
 1555       reiserfs_warning(p_s_sb, "journal-1212: REPLAY FAILURE fsck required! buffer write failed\n") ;
 1556       brelse_array(log_blocks + i, le32_to_cpu(desc->j_len) - i) ;
 1557       brelse_array(real_blocks, le32_to_cpu(desc->j_len)) ;
 1558       brelse(c_bh) ;
 1559       brelse(d_bh) ;
 1560       reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1561       reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1562       return -1 ;
 1563     }
 1564     memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
 1565     mark_buffer_uptodate(real_blocks[i], 1) ;
 1566     brelse(log_blocks[i]) ;
 1567   }
 1568   /* flush out the real blocks */
 1569   for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
 1570     set_bit(BH_Dirty, &(real_blocks[i]->b_state)) ;
 1571     ll_rw_block(WRITE, 1, real_blocks + i) ;
 1572   }
 1573   for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
 1574     wait_on_buffer(real_blocks[i]) ; 
 1575     if (!buffer_uptodate(real_blocks[i])) {
 1576       reiserfs_warning(p_s_sb, "journal-1226: REPLAY FAILURE, fsck required! buffer write failed\n") ;
 1577       brelse_array(real_blocks + i, le32_to_cpu(desc->j_len) - i) ;
 1578       brelse(c_bh) ;
 1579       brelse(d_bh) ;
 1580       reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1581       reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1582       return -1 ;
 1583     }
 1584     brelse(real_blocks[i]) ;
 1585   }
 1586   cur_dblock =  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
 1587   reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
 1588                  "start to offset %ld\n", 
 1589                  cur_dblock -  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
 1590   
 1591   /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
 1592   SB_JOURNAL(p_s_sb)->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
 1593   SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
 1594   SB_JOURNAL(p_s_sb)->j_trans_id = trans_id + 1;
 1595   brelse(c_bh) ;
 1596   brelse(d_bh) ;
 1597   reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1598   reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
 1599   return 0 ;
 1600 }
 1601 
 1602 /*
 1603 ** read and replay the log
 1604 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
 1605 ** transaction.  This tests that before finding all the transactions in the log, whic makes normal mount times fast.
 1606 **
 1607 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
 1608 **
 1609 ** On exit, it sets things up so the first transaction will work correctly.
 1610 */
 1611 struct buffer_head * reiserfs_breada (kdev_t dev, int block, int bufsize,
 1612                             unsigned int max_block)
 1613 {
 1614         struct buffer_head * bhlist[BUFNR];
 1615         unsigned int blocks = BUFNR;
 1616         struct buffer_head * bh;
 1617         int i, j;
 1618         
 1619         bh = getblk (dev, block, bufsize);
 1620         if (buffer_uptodate (bh))
 1621                 return (bh);   
 1622                 
 1623         if (block + BUFNR > max_block) {
 1624                 blocks = max_block - block;
 1625         }
 1626         bhlist[0] = bh;
 1627         j = 1;
 1628         for (i = 1; i < blocks; i++) {
 1629                 bh = getblk (dev, block + i, bufsize);
 1630                 if (buffer_uptodate (bh)) {
 1631                         brelse (bh);
 1632                         break;
 1633                 }
 1634                 else bhlist[j++] = bh;
 1635         }
 1636         ll_rw_block (READ, j, bhlist);
 1637         for(i = 1; i < j; i++) 
 1638                 brelse (bhlist[i]);
 1639         bh = bhlist[0];
 1640         wait_on_buffer (bh);
 1641         if (buffer_uptodate (bh))
 1642                 return bh;
 1643         brelse (bh);
 1644         return NULL;
 1645 }
 1646 
 1647 static struct buffer_head * journal_breada (struct super_block *p_s_sb, int block)
 1648 {
 1649   return reiserfs_breada (SB_JOURNAL_DEV(p_s_sb), block, p_s_sb->s_blocksize,
 1650                           SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
 1651 }
 1652 
 1653 static int journal_read(struct super_block *p_s_sb) {
 1654   struct reiserfs_journal_desc *desc ;
 1655   unsigned long oldest_trans_id = 0;
 1656   unsigned long oldest_invalid_trans_id = 0 ;
 1657   time_t start ;
 1658   unsigned long oldest_start = 0;
 1659   unsigned long cur_dblock = 0 ;
 1660   unsigned long newest_mount_id = 9 ;
 1661   struct buffer_head *d_bh ;
 1662   struct reiserfs_journal_header *jh ;
 1663   int valid_journal_header = 0 ;
 1664   int replay_count = 0 ;
 1665   int continue_replay = 1 ;
 1666   int ret ;
 1667 
 1668   cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
 1669   printk("reiserfs: checking transaction log (device %s) ...\n",
 1670           bdevname(SB_JOURNAL_DEV(p_s_sb))) ;
 1671   printk("for (%s)\n",
 1672           bdevname(p_s_sb->s_dev)) ;
 1673 
 1674   start = CURRENT_TIME ;
 1675 
 1676   /* step 1, read in the journal header block.  Check the transaction it says 
 1677   ** is the first unflushed, and if that transaction is not valid, 
 1678   ** replay is done
 1679   */
 1680   SB_JOURNAL(p_s_sb)->j_header_bh = journal_bread(p_s_sb, 
 1681                                           SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
 1682                                           SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
 1683   if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
 1684     return 1 ;
 1685   }
 1686   jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
 1687   if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 && 
 1688       le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
 1689       le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
 1690     oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
 1691                        le32_to_cpu(jh->j_first_unflushed_offset) ;
 1692     oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
 1693     newest_mount_id = le32_to_cpu(jh->j_mount_id);
 1694     reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
 1695                    "header: first_unflushed_offset %d, last_flushed_trans_id "
 1696                    "%lu\n", le32_to_cpu(jh->j_first_unflushed_offset), 
 1697                    le32_to_cpu(jh->j_last_flush_trans_id)) ;
 1698     valid_journal_header = 1 ;
 1699 
 1700     /* now, we try to read the first unflushed offset.  If it is not valid, 
 1701     ** there is nothing more we can do, and it makes no sense to read 
 1702     ** through the whole log.
 1703     */
 1704     d_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
 1705     ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
 1706     if (!ret) {
 1707       continue_replay = 0 ;
 1708     }
 1709     brelse(d_bh) ;
 1710     goto start_log_replay;
 1711   }
 1712 
 1713   if (continue_replay && is_read_only(p_s_sb->s_dev)) {
 1714     reiserfs_warning(p_s_sb, "clm-2076: device is readonly, unable to replay log\n") ;
 1715     return -1 ;
 1716   }
 1717   if (continue_replay && (p_s_sb->s_flags & MS_RDONLY)) {
 1718     printk("Warning, log replay starting on readonly filesystem\n") ;    
 1719   }
 1720 
 1721   /* ok, there are transactions that need to be replayed.  start with the first log block, find
 1722   ** all the valid transactions, and pick out the oldest.
 1723   */
 1724   while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
 1725     d_bh = journal_breada(p_s_sb, cur_dblock) ;
 1726     ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
 1727     if (ret == 1) {
 1728       desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
 1729       if (oldest_start == 0) { /* init all oldest_ values */
 1730         oldest_trans_id = le32_to_cpu(desc->j_trans_id) ;
 1731         oldest_start = d_bh->b_blocknr ;
 1732         newest_mount_id = le32_to_cpu(desc->j_mount_id) ;
 1733         reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
 1734                        "oldest_start to offset %lu, trans_id %lu\n", 
 1735                        oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
 1736                        oldest_trans_id) ;
 1737       } else if (oldest_trans_id > le32_to_cpu(desc->j_trans_id)) { 
 1738         /* one we just read was older */
 1739         oldest_trans_id = le32_to_cpu(desc->j_trans_id) ;
 1740         oldest_start = d_bh->b_blocknr ;
 1741         reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
 1742                        "oldest_start to offset %lu, trans_id %lu\n", 
 1743                         oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
 1744                         oldest_trans_id) ;
 1745       }
 1746       if (newest_mount_id < le32_to_cpu(desc->j_mount_id)) {
 1747         newest_mount_id = le32_to_cpu(desc->j_mount_id) ;
 1748         reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
 1749                       "newest_mount_id to %d\n", le32_to_cpu(desc->j_mount_id));
 1750       }
 1751       cur_dblock += le32_to_cpu(desc->j_len) + 2 ;
 1752     } else {
 1753       cur_dblock++ ;
 1754     }
 1755     brelse(d_bh) ;
 1756   }
 1757 
 1758 start_log_replay:
 1759   cur_dblock = oldest_start ;
 1760   if (oldest_trans_id)  {
 1761     reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
 1762                    "from offset %lu, trans_id %lu\n", 
 1763                    cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
 1764                    oldest_trans_id) ;
 1765 
 1766   }
 1767   replay_count = 0 ;
 1768   while(continue_replay && oldest_trans_id > 0) {
 1769     ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
 1770     if (ret < 0) {
 1771       return ret ;
 1772     } else if (ret != 0) {
 1773       break ;
 1774     }
 1775     cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start ;
 1776     replay_count++ ;
 1777    if (cur_dblock == oldest_start)
 1778         break;
 1779   }
 1780 
 1781   if (oldest_trans_id == 0) {
 1782     reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
 1783                    "transactions found\n") ;
 1784   }
 1785   /* j_start does not get set correctly if we don't replay any transactions.
 1786   ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
 1787   ** copy the trans_id from the header
 1788   */
 1789   if (valid_journal_header && replay_count == 0) { 
 1790     SB_JOURNAL(p_s_sb)->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
 1791     SB_JOURNAL(p_s_sb)->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
 1792     SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
 1793     SB_JOURNAL(p_s_sb)->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
 1794   } else {
 1795     SB_JOURNAL(p_s_sb)->j_mount_id = newest_mount_id + 1 ;
 1796   }
 1797   reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
 1798                  "newest_mount_id to %lu\n", SB_JOURNAL(p_s_sb)->j_mount_id) ;
 1799   SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = SB_JOURNAL(p_s_sb)->j_start ; 
 1800   if (replay_count > 0) {
 1801     printk("reiserfs: replayed %d transactions in %lu seconds\n", replay_count, 
 1802             CURRENT_TIME - start) ;
 1803   }
 1804   if (!is_read_only(p_s_sb->s_dev) && 
 1805        _update_journal_header_block(p_s_sb, SB_JOURNAL(p_s_sb)->j_start, 
 1806                                    SB_JOURNAL(p_s_sb)->j_last_flush_trans_id))
 1807   {
 1808       /* replay failed, caller must call free_journal_ram and abort
 1809       ** the mount
 1810       */
 1811       return -1 ;
 1812   }
 1813   return 0 ;
 1814 }
 1815 
 1816 
 1817 struct reiserfs_journal_commit_task {
 1818   struct super_block *p_s_sb ;
 1819   int jindex ;
 1820   int wake_on_finish ; /* if this is one, we wake the task_done queue, if it
 1821                        ** is zero, we free the whole struct on finish
 1822                        */
 1823   struct reiserfs_journal_commit_task *self ;
 1824   struct wait_queue *task_done ;
 1825   struct tq_struct task ;
 1826 } ;
 1827 
 1828 static void reiserfs_journal_commit_task_func(struct reiserfs_journal_commit_task *ct) {
 1829 
 1830   struct reiserfs_journal_list *jl ;
 1831   jl = SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex ;
 1832 
 1833   flush_commit_list(ct->p_s_sb, SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex, 1) ; 
 1834 
 1835   if (jl->j_len > 0 && atomic_read(&(jl->j_nonzerolen)) > 0 &&
 1836       atomic_read(&(jl->j_commit_left)) == 0) {
 1837     kupdate_one_transaction(ct->p_s_sb, jl) ;
 1838   }
 1839   reiserfs_kfree(ct->self, sizeof(struct reiserfs_journal_commit_task), ct->p_s_sb) ;
 1840 }
 1841 
 1842 static void setup_commit_task_arg(struct reiserfs_journal_commit_task *ct,
 1843                                   struct super_block *p_s_sb, 
 1844                                   int jindex) {
 1845   if (!ct) {
 1846     reiserfs_panic(NULL, "journal-1360: setup_commit_task_arg called with NULL struct\n") ;
 1847   }
 1848   ct->p_s_sb = p_s_sb ;
 1849   ct->jindex = jindex ;
 1850   ct->task_done = NULL ;
 1851   INIT_LIST_HEAD(&ct->task.list) ;
 1852   ct->task.sync = 0 ;
 1853   ct->task.routine = (void *)(void *)reiserfs_journal_commit_task_func ; 
 1854   ct->self = ct ;
 1855   ct->task.data = (void *)ct ;
 1856 }
 1857 
 1858 static void commit_flush_async(struct super_block *p_s_sb, int jindex) {
 1859   struct reiserfs_journal_commit_task *ct ;
 1860   /* using GFP_NOFS, GFP_KERNEL could try to flush inodes, which will try
 1861   ** to start/join a transaction, which will deadlock
 1862   */
 1863   ct = reiserfs_kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_NOFS, p_s_sb) ;
 1864   if (ct) {
 1865     setup_commit_task_arg(ct, p_s_sb, jindex) ;
 1866     queue_task(&(ct->task), &reiserfs_commit_thread_tq);
 1867     wake_up(&reiserfs_commit_thread_wait) ;
 1868   } else {
 1869 #ifdef CONFIG_REISERFS_CHECK
 1870     reiserfs_warning(p_s_sb, "journal-1540: kmalloc failed, doing sync commit\n") ;
 1871 #endif
 1872     flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
 1873   }
 1874 }
 1875 
 1876 /*
 1877 ** this is the commit thread.  It is started with kernel_thread on
 1878 ** FS mount, and journal_release() waits for it to exit.
 1879 **
 1880 ** It could do a periodic commit, but there is a lot code for that
 1881 ** elsewhere right now, and I only wanted to implement this little
 1882 ** piece for starters.
 1883 **
 1884 ** All we do here is sleep on the j_commit_thread_wait wait queue, and
 1885 ** then run the per filesystem commit task queue when we wakeup.
 1886 */
 1887 static int reiserfs_journal_commit_thread(void *nullp) {
 1888 
 1889   daemonize() ;
 1890 
 1891   spin_lock_irq(&current->sigmask_lock);
 1892   sigfillset(&current->blocked);
 1893   recalc_sigpending(current);
 1894   spin_unlock_irq(&current->sigmask_lock);
 1895 
 1896   sprintf(current->comm, "kreiserfsd") ;
 1897   lock_kernel() ;
 1898   while(1) {
 1899 
 1900     while(TQ_ACTIVE(reiserfs_commit_thread_tq)) {
 1901       run_task_queue(&reiserfs_commit_thread_tq) ;
 1902     }
 1903 
 1904     /* if there aren't any more filesystems left, break */
 1905     if (reiserfs_mounted_fs_count <= 0) {
 1906       run_task_queue(&reiserfs_commit_thread_tq) ;
 1907       break ;
 1908     }
 1909     wake_up(&reiserfs_commit_thread_done) ;
 1910     interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5 * HZ) ;
 1911   }
 1912   unlock_kernel() ;
 1913   wake_up(&reiserfs_commit_thread_done) ;
 1914   return 0 ;
 1915 }
 1916 
 1917 static void journal_list_init(struct super_block *p_s_sb) {
 1918   int i ;
 1919   for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
 1920     init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_commit_wait)) ;
 1921     init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_flush_wait)) ;
 1922   }
 1923 }
 1924 
 1925 static int release_journal_dev( struct super_block *super,
 1926                                 struct reiserfs_journal *journal )
 1927 {
 1928     int result;
 1929     
 1930     result = 0;
 1931         
 1932     if( journal -> j_dev_bd != NULL && journal->j_dev_bd != super->s_bdev) {
 1933         result = blkdev_put( journal -> j_dev_bd, BDEV_FS );
 1934         journal -> j_dev_bd = NULL;
 1935     }
 1936     if( journal -> j_dev_file != NULL ) {
 1937         result = filp_close( journal -> j_dev_file, NULL );
 1938         journal -> j_dev_file = NULL;
 1939     }
 1940     if( result != 0 ) {
 1941         reiserfs_warning(super, "release_journal_dev: Cannot release journal device: %i", result );
 1942     }
 1943     return result;
 1944 }
 1945 
 1946 static int journal_init_dev( struct super_block *super, 
 1947                              struct reiserfs_journal *journal, 
 1948                              const char *jdev_name )
 1949 {
 1950         int result;
 1951         kdev_t jdev;
 1952         int blkdev_mode = FMODE_READ | FMODE_WRITE;
 1953 
 1954         result = 0;
 1955 
 1956         journal -> j_dev_bd = NULL;
 1957         journal -> j_dev_file = NULL;
 1958         jdev = SB_JOURNAL_DEV( super ) = 
 1959                 SB_ONDISK_JOURNAL_DEVICE( super ) ?
 1960                 to_kdev_t(SB_ONDISK_JOURNAL_DEVICE( super )) : super -> s_dev;  
 1961 
 1962         /* there is no "jdev" option */
 1963 
 1964         if (is_read_only(super->s_dev))
 1965             blkdev_mode = FMODE_READ;
 1966 
 1967         if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
 1968 
 1969                 /* don't add an extra reference to the device when 
 1970                  * the log is on the same disk as the FS.  It makes the
 1971                  * raid code unhappy
 1972                  */
 1973                 if (jdev == super->s_dev) {
 1974                     journal->j_dev_bd = super->s_bdev;
 1975                     return 0;
 1976                 }
 1977                 journal -> j_dev_bd = bdget( kdev_t_to_nr( jdev ) );
 1978                 if( journal -> j_dev_bd ) {
 1979                         result = blkdev_get( journal -> j_dev_bd, 
 1980                                              blkdev_mode, 0, BDEV_FS );
 1981                         if (result) {
 1982                             bdput(journal->j_dev_bd);
 1983                             journal->j_dev_bd = NULL;
 1984                         }
 1985                 } else {
 1986                         result = -ENOMEM;
 1987                 } 
 1988                 if( result != 0 )
 1989                         printk( "journal_init_dev: cannot init journal device\n '%s': %i", 
 1990                                 kdevname( jdev ), result );
 1991 
 1992                 return result;
 1993         }
 1994 
 1995         /* "jdev" option has been found */
 1996 
 1997         journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
 1998         if( !IS_ERR( journal -> j_dev_file ) ) {
 1999                 struct inode *jdev_inode;
 2000 
 2001                 jdev_inode = journal -> j_dev_file -> f_dentry -> d_inode;
 2002                 journal -> j_dev_bd = jdev_inode -> i_bdev;
 2003                 if( !S_ISBLK( jdev_inode -> i_mode ) ) {
 2004                         printk( "journal_init_dev: '%s' is not a block device", jdev_name );
 2005                         result = -ENOTBLK;
 2006                 } else if( journal -> j_dev_file -> f_vfsmnt -> mnt_flags & MNT_NODEV) {
 2007                         printk( "journal_init_dev: Cannot use devices on '%s'", jdev_name );
 2008                         result = -EACCES;
 2009                 } else if( jdev_inode -> i_bdev == NULL ) {
 2010                         printk( "journal_init_dev: bdev unintialized for '%s'", jdev_name );
 2011                         result = -ENOMEM;
 2012                 } else if( ( result = blkdev_get( jdev_inode -> i_bdev, 
 2013                                                   blkdev_mode,
 2014                                                   0, BDEV_FS ) ) != 0 ) {
 2015                         journal -> j_dev_bd = NULL;
 2016                         printk( "journal_init_dev: Cannot load device '%s': %i", jdev_name,
 2017                              result );
 2018                 } else
 2019                         /* ok */
 2020                         SB_JOURNAL_DEV( super ) = 
 2021                                 to_kdev_t( jdev_inode -> i_bdev -> bd_dev );
 2022         } else {
 2023                 result = PTR_ERR( journal -> j_dev_file );
 2024                 journal -> j_dev_file = NULL;
 2025                 printk( "journal_init_dev: Cannot open '%s': %i", jdev_name, result );
 2026         }
 2027         if( result != 0 ) {
 2028                 release_journal_dev( super, journal );
 2029         }
 2030         printk( "journal_init_dev: journal device: %s", kdevname( SB_JOURNAL_DEV( super ) ) );
 2031         return result;
 2032 }
 2033 
 2034 /*
 2035 ** must be called once on fs mount.  calls journal_read for you
 2036 */
 2037 int journal_init(struct super_block *p_s_sb, const char * j_dev_name, 
 2038                   int old_format) {
 2039     int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
 2040     struct buffer_head *bhjh;
 2041     struct reiserfs_super_block * rs;
 2042     struct reiserfs_journal_header *jh;
 2043     struct reiserfs_journal *journal;
 2044 
 2045     if (sizeof(struct reiserfs_journal_commit) != 4096 ||
 2046         sizeof(struct reiserfs_journal_desc) != 4096) {
 2047         reiserfs_warning(p_s_sb, "journal-1249: commit or desc struct not 4096 %Zd %Zd\n", 
 2048                sizeof(struct reiserfs_journal_commit), 
 2049         sizeof(struct reiserfs_journal_desc)) ;
 2050         return 1 ;
 2051     }
 2052 
 2053     if ( SB_ONDISK_JOURNAL_SIZE(p_s_sb) < 512 ) {
 2054         reiserfs_warning(p_s_sb, "Journal size %d is less than 512+1 blocks, which unsupported\n", SB_ONDISK_JOURNAL_SIZE(p_s_sb));
 2055         return 1 ;
 2056     }
 2057 
 2058     journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
 2059     if (!journal) {
 2060         reiserfs_warning(p_s_sb, "journal-1256: unable to get memory for journal structure\n") ;
 2061         return 1 ;
 2062     }
 2063     memset(journal, 0, sizeof(struct reiserfs_journal)) ;
 2064     INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
 2065     INIT_LIST_HEAD (&SB_JOURNAL(p_s_sb)->j_prealloc_list);
 2066 
 2067     reiserfs_allocate_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap, 
 2068                                    SB_BMAP_NR(p_s_sb)) ;
 2069     allocate_bitmap_nodes(p_s_sb) ;
 2070 
 2071     /* reserved for journal area support */
 2072     SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
 2073                                             REISERFS_OLD_DISK_OFFSET_IN_BYTES /
 2074                                             p_s_sb->s_blocksize +
 2075                                             SB_BMAP_NR(p_s_sb) + 1 :
 2076                                             REISERFS_DISK_OFFSET_IN_BYTES / 
 2077                                             p_s_sb->s_blocksize + 2); 
 2078     
 2079     if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
 2080         reiserfs_warning(p_s_sb, "journal-1259: unable to initialize jornal device\n");
 2081         goto free_and_return;
 2082     }
 2083 
 2084     rs = SB_DISK_SUPER_BLOCK(p_s_sb);
 2085      
 2086     /* read journal header */
 2087     bhjh = journal_bread (p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
 2088                           SB_ONDISK_JOURNAL_SIZE(p_s_sb));
 2089     if (!bhjh) {
 2090         reiserfs_warning(p_s_sb, "journal-459: unable to read  journal header\n") ;
 2091         goto free_and_return;
 2092     }
 2093     jh = (struct reiserfs_journal_header *)(bhjh->b_data);
 2094      
 2095     /* make sure that journal matches to the super block */
 2096     if (is_reiserfs_jr(rs) && 
 2097         jh->jh_journal.jp_journal_magic != sb_jp_journal_magic(rs)) {
 2098         char jname[ 32 ];
 2099         char fname[ 32 ];
 2100          
 2101         strcpy( jname, kdevname( SB_JOURNAL_DEV(p_s_sb) ) );
 2102         strcpy( fname, kdevname( p_s_sb->s_dev ) );
 2103         printk("journal-460: journal header magic %x (device %s) does not "
 2104                "match magic found in super block %x (device %s)\n",
 2105                 jh->jh_journal.jp_journal_magic, jname,
 2106                 sb_jp_journal_magic(rs), fname);
 2107         brelse (bhjh);
 2108         goto free_and_return;
 2109     }
 2110      
 2111     SB_JOURNAL_TRANS_MAX(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
 2112     SB_JOURNAL_MAX_BATCH(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
 2113     SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
 2114     SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = JOURNAL_MAX_TRANS_AGE;
 2115 
 2116     if (SB_JOURNAL_TRANS_MAX(p_s_sb)) {
 2117         /* make sure these parameters are available, assign if they are not */
 2118         __u32 initial = SB_JOURNAL_TRANS_MAX(p_s_sb);
 2119         __u32 ratio = 1;
 2120     
 2121         if (p_s_sb->s_blocksize < 4096)
 2122             ratio = 4096 / p_s_sb->s_blocksize;
 2123 
 2124         if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/SB_JOURNAL_TRANS_MAX(p_s_sb) < 
 2125             JOURNAL_MIN_RATIO) 
 2126         {
 2127             SB_JOURNAL_TRANS_MAX(p_s_sb) = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / 
 2128                                            JOURNAL_MIN_RATIO;
 2129         }
 2130         if (SB_JOURNAL_TRANS_MAX(p_s_sb) > JOURNAL_TRANS_MAX_DEFAULT / ratio)
 2131             SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT / ratio;
 2132         if (SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_TRANS_MIN_DEFAULT / ratio)
 2133             SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MIN_DEFAULT / ratio;
 2134 
 2135         if (SB_JOURNAL_TRANS_MAX(p_s_sb) != initial) {
 2136             printk ("reiserfs warning: wrong transaction max size (%u). "
 2137                     "Changed to %u\n", initial, SB_JOURNAL_TRANS_MAX(p_s_sb));
 2138         }
 2139         SB_JOURNAL_MAX_BATCH(p_s_sb) = SB_JOURNAL_TRANS_MAX(p_s_sb) *
 2140                                        JOURNAL_MAX_BATCH_DEFAULT / 
 2141                                        JOURNAL_TRANS_MAX_DEFAULT;
 2142     }
 2143   
 2144     if (!SB_JOURNAL_TRANS_MAX(p_s_sb)) {
 2145         /*we have the file system was created by old version of mkreiserfs 
 2146           so this field contains zero value */
 2147         SB_JOURNAL_TRANS_MAX(p_s_sb)      = JOURNAL_TRANS_MAX_DEFAULT ;
 2148         SB_JOURNAL_MAX_BATCH(p_s_sb)      = JOURNAL_MAX_BATCH_DEFAULT ;  
 2149         SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = JOURNAL_MAX_COMMIT_AGE ;
 2150         
 2151         /* for blocksize >= 4096 - max transaction size is 1024. For 
 2152            block size < 4096 trans max size is decreased proportionally */
 2153         if (p_s_sb->s_blocksize < 4096) {
 2154             SB_JOURNAL_TRANS_MAX(p_s_sb) /= (4096 / p_s_sb->s_blocksize) ;
 2155             SB_JOURNAL_MAX_BATCH(p_s_sb) = SB_JOURNAL_TRANS_MAX(p_s_sb)*9 / 10;
 2156         }
 2157     }
 2158 
 2159     brelse (bhjh);
 2160 
 2161     SB_JOURNAL(p_s_sb)->j_list_bitmap_index = 0 ;
 2162     SB_JOURNAL_LIST_INDEX(p_s_sb) = -10000 ; /* make sure flush_old_commits does not try to flush a list while replay is on */
 2163 
 2164     /* clear out the journal list array */
 2165     memset(SB_JOURNAL_LIST(p_s_sb), 0, 
 2166            sizeof(struct reiserfs_journal_list) * JOURNAL_LIST_COUNT) ; 
 2167 
 2168     journal_list_init(p_s_sb) ;
 2169 
 2170     memset(SB_JOURNAL(p_s_sb)->j_list_hash_table, 0, 
 2171            JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
 2172     memset(journal_writers, 0, sizeof(char *) * 512) ; /* debug code */
 2173 
 2174     INIT_LIST_HEAD(&(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
 2175 
 2176     SB_JOURNAL(p_s_sb)->j_start = 0 ;
 2177     SB_JOURNAL(p_s_sb)->j_len = 0 ;
 2178     SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
 2179     atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
 2180     SB_JOURNAL(p_s_sb)->j_bcount = 0 ;    
 2181     SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;          
 2182     SB_JOURNAL(p_s_sb)->j_last = NULL ;   
 2183     SB_JOURNAL(p_s_sb)->j_first = NULL ;     
 2184     init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
 2185     init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_wait)) ; 
 2186 
 2187     SB_JOURNAL(p_s_sb)->j_trans_id = 10 ;  
 2188     SB_JOURNAL(p_s_sb)->j_mount_id = 10 ; 
 2189     SB_JOURNAL(p_s_sb)->j_state = 0 ;
 2190     atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
 2191     atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 0) ;
 2192     SB_JOURNAL(p_s_sb)->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
 2193     SB_JOURNAL(p_s_sb)->j_cnode_free_orig = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
 2194     SB_JOURNAL(p_s_sb)->j_cnode_free = SB_JOURNAL(p_s_sb)->j_cnode_free_list ? 
 2195                                        num_cnodes : 0 ;
 2196     SB_JOURNAL(p_s_sb)->j_cnode_used = 0 ;
 2197     SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
 2198     init_journal_hash(p_s_sb) ;
 2199     SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb)) ;
 2200     if (!(SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap)) {
 2201         reiserfs_warning(p_s_sb, "journal-2005, get_list_bitmap failed for journal list 0\n") ;
 2202         goto free_and_return;
 2203     }
 2204     if (journal_read(p_s_sb) < 0) {
 2205         reiserfs_warning(p_s_sb, "Replay Failure, unable to mount\n") ;
 2206         goto free_and_return;
 2207     }
 2208     /* once the read is done, we can set this where it belongs */
 2209     SB_JOURNAL_LIST_INDEX(p_s_sb) = 0 ; 
 2210 
 2211     if (reiserfs_dont_log (p_s_sb))
 2212         return 0;
 2213 
 2214     reiserfs_mounted_fs_count++ ;
 2215     if (reiserfs_mounted_fs_count <= 1) {
 2216         kernel_thread((void *)(void *)reiserfs_journal_commit_thread, NULL,
 2217                       CLONE_FS | CLONE_FILES | CLONE_VM) ;
 2218     }
 2219     return 0 ;
 2220 
 2221 free_and_return:
 2222     free_journal_ram(p_s_sb);
 2223     return 1;
 2224 }
 2225 
 2226 /*
 2227 ** test for a polite end of the current transaction.  Used by file_write, and should
 2228 ** be used by delete to make sure they don't write more than can fit inside a single
 2229 ** transaction
 2230 */
 2231 int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
 2232   time_t now = CURRENT_TIME ;
 2233   if (reiserfs_dont_log(th->t_super)) 
 2234     return 0 ;
 2235   if ( SB_JOURNAL(th->t_super)->j_must_wait > 0 ||
 2236        (SB_JOURNAL(th->t_super)->j_len_alloc + new_alloc) >= SB_JOURNAL_MAX_BATCH(th->t_super) || 
 2237        atomic_read(&(SB_JOURNAL(th->t_super)->j_jlock)) ||
 2238       (now - SB_JOURNAL(th->t_super)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(th->t_super) ||
 2239        SB_JOURNAL(th->t_super)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(th->t_super) * 3)) { 
 2240     return 1 ;
 2241   }
 2242   return 0 ;
 2243 }
 2244 
 2245 /* this must be called inside a transaction, and requires the 
 2246 ** kernel_lock to be held
 2247 */
 2248 void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
 2249     struct super_block *s = th->t_super ;
 2250     SB_JOURNAL(s)->j_must_wait = 1 ;
 2251     set_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
 2252     return ;
 2253 }
 2254 
 2255 /* this must be called without a transaction started, and does not
 2256 ** require BKL
 2257 */
 2258 void reiserfs_allow_writes(struct super_block *s) {
 2259     clear_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
 2260     wake_up(&SB_JOURNAL(s)->j_join_wait) ;
 2261 }
 2262 
 2263 /* this must be called without a transaction started, and does not
 2264 ** require BKL
 2265 */
 2266 void reiserfs_wait_on_write_block(struct super_block *s) {
 2267     wait_event(SB_JOURNAL(s)->j_join_wait, 
 2268                !test_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state)) ;
 2269 }
 2270 
 2271 /* join == true if you must join an existing transaction.
 2272 ** join == false if you can deal with waiting for others to finish
 2273 **
 2274 ** this will block until the transaction is joinable.  send the number of blocks you
 2275 ** expect to use in nblocks.
 2276 */
 2277 static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
 2278   time_t now = CURRENT_TIME ;
 2279   int old_trans_id  ;
 2280 
 2281   reiserfs_check_lock_depth("journal_begin") ;
 2282   RFALSE( p_s_sb->s_flags & MS_RDONLY, 
 2283           "clm-2078: calling journal_begin on readonly FS") ;
 2284 
 2285   if (reiserfs_dont_log(p_s_sb)) {
 2286     th->t_super = p_s_sb ; /* others will check this for the don't log flag */
 2287     return 0 ;
 2288   }
 2289   PROC_INFO_INC( p_s_sb, journal.journal_being );
 2290 
 2291 relock:
 2292   lock_journal(p_s_sb) ;
 2293 
 2294   if (test_bit(WRITERS_BLOCKED, &SB_JOURNAL(p_s_sb)->j_state)) {
 2295     unlock_journal(p_s_sb) ;
 2296     reiserfs_wait_on_write_block(p_s_sb) ;
 2297     PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
 2298     goto relock ;
 2299   }
 2300 
 2301   /* if there is no room in the journal OR
 2302   ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning 
 2303   ** we don't sleep if there aren't other writers
 2304   */
 2305 
 2306   if (  (!join && SB_JOURNAL(p_s_sb)->j_must_wait > 0) ||
 2307      ( !join && (SB_JOURNAL(p_s_sb)->j_len_alloc + nblocks + 2) >= SB_JOURNAL_MAX_BATCH(p_s_sb)) || 
 2308      (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0 && SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 && 
 2309       (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) ||
 2310      (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) ) ||
 2311      (!join && SB_JOURNAL(p_s_sb)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3))) {
 2312 
 2313     unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
 2314 
 2315     /* if writer count is 0, we can just force this transaction to end, and start
 2316     ** a new one afterwards.
 2317     */
 2318     if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
 2319       struct reiserfs_transaction_handle myth ;
 2320       journal_join(&myth, p_s_sb, 1) ;
 2321       reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
 2322       journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
 2323       do_journal_end(&myth, p_s_sb,1,COMMIT_NOW) ;
 2324     } else {
 2325       /* but if the writer count isn't zero, we have to wait for the current writers to finish.
 2326       ** They won't batch on transaction end once we set j_jlock
 2327       */
 2328       atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
 2329       old_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
 2330       while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) &&
 2331             SB_JOURNAL(p_s_sb)->j_trans_id == old_trans_id) {
 2332         sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
 2333       }
 2334     }
 2335     PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
 2336     goto relock ;
 2337   }
 2338 
 2339   if (SB_JOURNAL(p_s_sb)->j_trans_start_time == 0) { /* we are the first writer, set trans_id */
 2340     SB_JOURNAL(p_s_sb)->j_trans_start_time = now ;
 2341   }
 2342   atomic_inc(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
 2343   SB_JOURNAL(p_s_sb)->j_len_alloc += nblocks ;
 2344   th->t_blocks_logged = 0 ;
 2345   th->t_blocks_allocated = nblocks ;
 2346   th->t_super = p_s_sb ;
 2347   th->t_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
 2348   th->t_caller = "Unknown" ;
 2349   unlock_journal(p_s_sb) ;
 2350   p_s_sb->s_dirt = 1; 
 2351   return 0 ;
 2352 }
 2353 
 2354 
 2355 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
 2356   return do_journal_begin_r(th, p_s_sb, nblocks, 1) ;
 2357 }
 2358 
 2359 int journal_begin(struct reiserfs_transaction_handle *th, struct super_block  * p_s_sb, unsigned long nblocks) {
 2360   return do_journal_begin_r(th, p_s_sb, nblocks, 0) ;
 2361 }
 2362 
 2363 /* not used at all */
 2364 int journal_prepare(struct super_block  * p_s_sb, struct buffer_head *bh) {
 2365   return 0 ;
 2366 }
 2367 
 2368 /*
 2369 ** puts bh into the current transaction.  If it was already there, reorders removes the
 2370 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
 2371 **
 2372 ** if it was dirty, cleans and files onto the clean list.  I can't let it be dirty again until the
 2373 ** transaction is committed.
 2374 ** 
 2375 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
 2376 */
 2377 int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
 2378   struct reiserfs_journal_cnode *cn = NULL;
 2379   int count_already_incd = 0 ;
 2380   int prepared = 0 ;
 2381 
 2382   PROC_INFO_INC( p_s_sb, journal.mark_dirty );
 2383   if (reiserfs_dont_log(th->t_super)) {
 2384     mark_buffer_dirty(bh) ;
 2385     return 0 ;
 2386   }
 2387 
 2388   if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
 2389     reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n", 
 2390                    th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
 2391   }
 2392   p_s_sb->s_dirt = 1 ;
 2393 
 2394   prepared = test_and_clear_bit(BH_JPrepared, &bh->b_state) ;
 2395   /* already in this transaction, we are done */
 2396   if (buffer_journaled(bh)) {
 2397     PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
 2398     return 0 ;
 2399   }
 2400 
 2401   /* this must be turned into a panic instead of a warning.  We can't allow
 2402   ** a dirty or journal_dirty or locked buffer to be logged, as some changes
 2403   ** could get to disk too early.  NOT GOOD.
 2404   */
 2405   if (!prepared || buffer_locked(bh)) {
 2406     reiserfs_warning(p_s_sb, "journal-1777: buffer %lu bad state %cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT\n", bh->b_blocknr, prepared ? ' ' : '!', 
 2407                             buffer_locked(bh) ? ' ' : '!',
 2408                             buffer_dirty(bh) ? ' ' : '!',
 2409                             buffer_journal_dirty(bh) ? ' ' : '!') ;
 2410     show_reiserfs_locks() ;
 2411   }
 2412   count_already_incd = clear_prepared_bits(bh) ;
 2413 
 2414   if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
 2415     reiserfs_warning(p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d\n", atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount))) ;
 2416     return 1 ;
 2417   }
 2418   /* this error means I've screwed up, and we've overflowed the transaction.  
 2419   ** Nothing can be done here, except make the FS readonly or panic.
 2420   */ 
 2421   if (SB_JOURNAL(p_s_sb)->j_len >= SB_JOURNAL_TRANS_MAX(p_s_sb)) { 
 2422     reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", SB_JOURNAL(p_s_sb)->j_len) ;
 2423   }
 2424 
 2425   if (buffer_journal_dirty(bh)) {
 2426     count_already_incd = 1 ;
 2427     PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
 2428     mark_buffer_notjournal_dirty(bh) ;
 2429   }
 2430 
 2431   if (buffer_dirty(bh)) {
 2432     clear_bit(BH_Dirty, &bh->b_state) ;
 2433   }
 2434 
 2435   if (buffer_journaled(bh)) { /* must double check after getting lock */
 2436     goto done ;
 2437   }
 2438 
 2439   if (SB_JOURNAL(p_s_sb)->j_len > SB_JOURNAL(p_s_sb)->j_len_alloc) {
 2440     SB_JOURNAL(p_s_sb)->j_len_alloc = SB_JOURNAL(p_s_sb)->j_len + JOURNAL_PER_BALANCE_CNT ;
 2441   }
 2442 
 2443   set_bit(BH_JDirty, &bh->b_state) ;
 2444 
 2445   /* now put this guy on the end */
 2446   if (!cn) {
 2447     cn = get_cnode(p_s_sb) ;
 2448     if (!cn) {
 2449       reiserfs_panic(p_s_sb, "get_cnode failed!\n"); 
 2450     }
 2451 
 2452     if (th->t_blocks_logged == th->t_blocks_allocated) {
 2453       th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
 2454       SB_JOURNAL(p_s_sb)->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
 2455     }
 2456     th->t_blocks_logged++ ;
 2457     SB_JOURNAL(p_s_sb)->j_len++ ;
 2458 
 2459     cn->bh = bh ;
 2460     cn->blocknr = bh->b_blocknr ;
 2461     cn->dev = bh->b_dev ;
 2462     cn->jlist = NULL ;
 2463     insert_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, cn) ;
 2464     if (!count_already_incd) {
 2465       get_bh(bh) ;
 2466     }
 2467   }
 2468   cn->next = NULL ;
 2469   cn->prev = SB_JOURNAL(p_s_sb)->j_last ;
 2470   cn->bh = bh ;
 2471   if (SB_JOURNAL(p_s_sb)->j_last) {
 2472     SB_JOURNAL(p_s_sb)->j_last->next = cn ;
 2473     SB_JOURNAL(p_s_sb)->j_last = cn ;
 2474   } else {
 2475     SB_JOURNAL(p_s_sb)->j_first = cn ;
 2476     SB_JOURNAL(p_s_sb)->j_last = cn ;
 2477   }
 2478 done:
 2479   return 0 ;
 2480 }
 2481 
 2482 /*
 2483 ** if buffer already in current transaction, do a journal_mark_dirty
 2484 ** otherwise, just mark it dirty and move on.  Used for writes to meta blocks
 2485 ** that don't need journaling
 2486 */
 2487 int journal_mark_dirty_nolog(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
 2488   if (reiserfs_dont_log(th->t_super) || buffer_journaled(bh) || 
 2489       buffer_journal_dirty(bh)) {
 2490     return journal_mark_dirty(th, p_s_sb, bh) ;
 2491   }
 2492   if (get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, bh->b_dev,bh->b_blocknr,bh->b_size)) {
 2493     return journal_mark_dirty(th, p_s_sb, bh) ;
 2494   }
 2495   mark_buffer_dirty(bh) ;
 2496   return 0 ;
 2497 }
 2498 
 2499 int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
 2500   return do_journal_end(th, p_s_sb, nblocks, 0) ;
 2501 }
 2502 
 2503 /* removes from the current transaction, relsing and descrementing any counters.  
 2504 ** also files the removed buffer directly onto the clean list
 2505 **
 2506 ** called by journal_mark_freed when a block has been deleted
 2507 **
 2508 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
 2509 */
 2510 static int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) {
 2511   struct buffer_head *bh ;
 2512   struct reiserfs_journal_cnode *cn ;
 2513   int ret = 0;
 2514 
 2515   cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
 2516   if (!cn || !cn->bh) {
 2517     return ret ;
 2518   }
 2519   bh = cn->bh ;
 2520   if (cn->prev) {
 2521     cn->prev->next = cn->next ;
 2522   }
 2523   if (cn->next) {
 2524     cn->next->prev = cn->prev ;
 2525   }
 2526   if (cn == SB_JOURNAL(p_s_sb)->j_first) {
 2527     SB_JOURNAL(p_s_sb)->j_first = cn->next ;  
 2528   }
 2529   if (cn == SB_JOURNAL(p_s_sb)->j_last) {
 2530     SB_JOURNAL(p_s_sb)->j_last = cn->prev ;
 2531   }
 2532   remove_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, NULL, bh, 0) ; 
 2533   mark_buffer_not_journaled(bh) ; /* don't log this one */
 2534 
 2535   if (!already_cleaned) {
 2536     mark_buffer_notjournal_dirty(bh) ; 
 2537     put_bh(bh) ;
 2538     if (atomic_read(&(bh->b_count)) < 0) {
 2539       reiserfs_warning(p_s_sb, "journal-1752: remove from trans, b_count < 0\n") ;
 2540     }
 2541     if (!buffer_locked(bh)) reiserfs_clean_and_file_buffer(bh) ; 
 2542     ret = 1 ;
 2543   }
 2544   SB_JOURNAL(p_s_sb)->j_len-- ;
 2545   SB_JOURNAL(p_s_sb)->j_len_alloc-- ;
 2546   free_cnode(p_s_sb, cn) ;
 2547   return ret ;
 2548 }
 2549 
 2550 /* removes from a specific journal list hash */
 2551 static int remove_from_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_head *bh, int remove_freed) {
 2552   remove_journal_hash(SB_JOURNAL(s)->j_list_hash_table, jl, bh, remove_freed) ;
 2553   return 0 ;
 2554 }
 2555 
 2556 /*
 2557 ** for any cnode in a journal list, it can only be dirtied of all the
 2558 ** transactions that include it are commited to disk.
 2559 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
 2560 ** and 0 if you aren't
 2561 **
 2562 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
 2563 ** blocks for a given transaction on disk
 2564 **
 2565 */
 2566 static int can_dirty(struct reiserfs_journal_cnode *cn) {
 2567   kdev_t dev = cn->dev ;
 2568   unsigned long blocknr = cn->blocknr  ;
 2569   struct reiserfs_journal_cnode *cur = cn->hprev ;
 2570   int can_dirty = 1 ;
 2571   
 2572   /* first test hprev.  These are all newer than cn, so any node here
 2573   ** with the name block number and dev means this node can't be sent
 2574   ** to disk right now.
 2575   */
 2576   while(cur && can_dirty) {
 2577     if (cur->jlist && cur->bh && cur->blocknr && cur->dev == dev && 
 2578         cur->blocknr == blocknr) {
 2579       can_dirty = 0 ;
 2580     }
 2581     cur = cur->hprev ;
 2582   }
 2583   /* then test hnext.  These are all older than cn.  As long as they
 2584   ** are committed to the log, it is safe to write cn to disk
 2585   */
 2586   cur = cn->hnext ;
 2587   while(cur && can_dirty) {
 2588     if (cur->jlist && cur->jlist->j_len > 0 && 
 2589         atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh && 
 2590         cur->blocknr && cur->dev == dev && cur->blocknr == blocknr) {
 2591       can_dirty = 0 ;
 2592     }
 2593     cur = cur->hnext ;
 2594   }
 2595   return can_dirty ;
 2596 }
 2597 
 2598 /* syncs the commit blocks, but does not force the real buffers to disk
 2599 ** will wait until the current transaction is done/commited before returning 
 2600 */
 2601 int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
 2602 
 2603   if (SB_JOURNAL(p_s_sb)->j_len == 0) {
 2604     reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
 2605     journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
 2606   }
 2607   return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
 2608 }
 2609 
 2610 int show_reiserfs_locks(void) {
 2611 
 2612   dump_journal_writers() ;
 2613   return 0 ;
 2614 }
 2615 
 2616 /*
 2617 ** used to get memory back from async commits that are floating around
 2618 ** and to reclaim any blocks deleted but unusable because their commits
 2619 ** haven't hit disk yet.  called from bitmap.c
 2620 **
 2621 ** if it starts flushing things, it ors SCHEDULE_OCCURRED into repeat.
 2622 ** note, this is just if schedule has a chance of occuring.  I need to 
 2623 ** change flush_commit_lists to have a repeat parameter too.
 2624 **
 2625 */
 2626 void flush_async_commits(struct super_block *p_s_sb) {
 2627   int i ;
 2628 
 2629   for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
 2630     if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
 2631       flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ; 
 2632     }
 2633   }
 2634 }
 2635 
 2636 /*
 2637 ** flushes any old transactions to disk
 2638 ** ends the current transaction if it is too old
 2639 **
 2640 ** also calls flush_journal_list with old_only == 1, which allows me to reclaim
 2641 ** memory and such from the journal lists whose real blocks are all on disk.
 2642 **
 2643 ** called by sync_dev_journal from buffer.c
 2644 */
 2645 int flush_old_commits(struct super_block *p_s_sb, int immediate) {
 2646   int i ;
 2647   int count = 0;
 2648   int start ; 
 2649   time_t now ; 
 2650   struct reiserfs_transaction_handle th ; 
 2651 
 2652   start =  SB_JOURNAL_LIST_INDEX(p_s_sb) ;
 2653   now = CURRENT_TIME ;
 2654 
 2655   /* safety check so we don't flush while we are replaying the log during mount */
 2656   if (SB_JOURNAL_LIST_INDEX(p_s_sb) < 0) {
 2657     return 0  ;
 2658   }
 2659   /* starting with oldest, loop until we get to the start */
 2660   i = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
 2661   while(i != start) {
 2662     if (SB_JOURNAL_LIST(p_s_sb)[i].j_len > 0 && ((now - SB_JOURNAL_LIST(p_s_sb)[i].j_timestamp) > SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) ||
 2663        immediate)) {
 2664       /* we have to check again to be sure the current transaction did not change */
 2665       if (i != SB_JOURNAL_LIST_INDEX(p_s_sb))  {
 2666         flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ;
 2667       }
 2668     }
 2669     i = (i + 1) % JOURNAL_LIST_COUNT ;
 2670     count++ ;
 2671   }
 2672   /* now, check the current transaction.  If there are no writers, and it is too old, finish it, and
 2673   ** force the commit blocks to disk
 2674   */
 2675   if (!immediate && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0 &&  
 2676      SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 && 
 2677      SB_JOURNAL(p_s_sb)->j_len > 0 && 
 2678      (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
 2679     journal_join(&th, p_s_sb, 1) ;
 2680     reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
 2681     journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
 2682     do_journal_end(&th, p_s_sb,1, COMMIT_NOW) ;
 2683   } else if (immediate) { /* belongs above, but I wanted this to be very explicit as a special case.  If they say to 
 2684                              flush, we must be sure old transactions hit the disk too. */
 2685     journal_join(&th, p_s_sb, 1) ;
 2686     reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
 2687     journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
 2688     do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
 2689   }
 2690    reiserfs_journal_kupdate(p_s_sb) ;
 2691    return 0 ;
 2692 }
 2693 
 2694 /*
 2695 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
 2696 ** 
 2697 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all 
 2698 ** the writers are done.  By the time it wakes up, the transaction it was called has already ended, so it just
 2699 ** flushes the commit list and returns 0.
 2700 **
 2701 ** Won't batch when flush or commit_now is set.  Also won't batch when others are waiting on j_join_wait.
 2702 ** 
 2703 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
 2704 */
 2705 static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block  * p_s_sb, 
 2706                              unsigned long nblocks, int flags) {
 2707 
 2708   time_t now ;
 2709   int flush = flags & FLUSH_ALL ;
 2710   int commit_now = flags & COMMIT_NOW ;
 2711   int wait_on_commit = flags & WAIT ;
 2712 
 2713   if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
 2714     reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n", 
 2715                    th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
 2716   }
 2717 
 2718   SB_JOURNAL(p_s_sb)->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
 2719   if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) { /* <= 0 is allowed.  unmounting might not call begin */
 2720     atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
 2721   }
 2722 
 2723   /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released 
 2724   ** will be dealt with by next transaction that actually writes something, but should be taken
 2725   ** care of in this trans
 2726   */
 2727   if (SB_JOURNAL(p_s_sb)->j_len == 0) {
 2728     int wcount = atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
 2729     unlock_journal(p_s_sb) ;
 2730     if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock))  > 0 && wcount <= 0) {
 2731       atomic_dec(&(SB_JOURNAL(p_s_sb)->j_jlock)) ;
 2732       wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
 2733     }
 2734     return 0 ;
 2735   }
 2736   /* if wcount > 0, and we are called to with flush or commit_now,
 2737   ** we wait on j_join_wait.  We will wake up when the last writer has
 2738   ** finished the transaction, and started it on its way to the disk.
 2739   ** Then, we flush the commit or journal list, and just return 0 
 2740   ** because the rest of journal end was already done for this transaction.
 2741   */
 2742   if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) {
 2743     if (flush || commit_now) {
 2744       int orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
 2745       atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
 2746       if (flush) {
 2747         SB_JOURNAL(p_s_sb)->j_next_full_flush = 1 ;
 2748       }
 2749       unlock_journal(p_s_sb) ;
 2750       /* sleep while the current transaction is still j_jlocked */
 2751       while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) && 
 2752             SB_JOURNAL(p_s_sb)->j_trans_id == th->t_trans_id) {
 2753         sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
 2754       }
 2755       if (commit_now) {
 2756         if (wait_on_commit) {
 2757           flush_commit_list(p_s_sb,  SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
 2758         } else {
 2759           commit_flush_async(p_s_sb, orig_jindex) ; 
 2760         }
 2761       }
 2762       return 0 ;
 2763     } 
 2764     unlock_journal(p_s_sb) ;
 2765     return 0 ;
 2766   }
 2767 
 2768   /* deal with old transactions where we are the last writers */
 2769   now = CURRENT_TIME ;
 2770   if ((now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
 2771     commit_now = 1 ;
 2772     SB_JOURNAL(p_s_sb)->j_next_async_flush = 1 ;
 2773   }
 2774   /* don't batch when someone is waiting on j_join_wait */
 2775   /* don't batch when syncing the commit or flushing the whole trans */
 2776   if (!(SB_JOURNAL(p_s_sb)->j_must_wait > 0) && !(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock))) && !flush && !commit_now && 
 2777       (SB_JOURNAL(p_s_sb)->j_len < SB_JOURNAL_MAX_BATCH(p_s_sb))  && 
 2778       SB_JOURNAL(p_s_sb)->j_len_alloc < SB_JOURNAL_MAX_BATCH(p_s_sb) && SB_JOURNAL(p_s_sb)->j_cnode_free > (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3)) {
 2779     SB_JOURNAL(p_s_sb)->j_bcount++ ;
 2780     unlock_journal(p_s_sb) ;
 2781     return 0 ;
 2782   }
 2783 
 2784   if (SB_JOURNAL(p_s_sb)->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
 2785     reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", SB_JOURNAL(p_s_sb)->j_start) ;
 2786   }
 2787   return 1 ;
 2788 }
 2789 
 2790 /*
 2791 ** Does all the work that makes deleting blocks safe.
 2792 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
 2793 ** 
 2794 ** otherwise:
 2795 ** set a bit for the block in the journal bitmap.  That will prevent it from being allocated for unformatted nodes
 2796 ** before this transaction has finished.
 2797 **
 2798 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.  That will prevent any old transactions with
 2799 ** this block from trying to flush to the real location.  Since we aren't removing the cnode from the journal_list_hash,
 2800 ** the block can't be reallocated yet.
 2801 **
 2802 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
 2803 */
 2804 int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long blocknr) {
 2805   struct reiserfs_journal_cnode *cn = NULL ;
 2806   struct buffer_head *bh = NULL ;
 2807   struct reiserfs_list_bitmap *jb = NULL ;
 2808   int cleaned = 0 ;
 2809   
 2810   if (reiserfs_dont_log(th->t_super)) {
 2811     bh = sb_get_hash_table(p_s_sb, blocknr) ;
 2812     if (bh && buffer_dirty (bh)) {
 2813       reiserfs_warning (p_s_sb, "journal_mark_freed(dont_log): dirty buffer on hash list: %lx %ld\n", bh->b_state, blocknr);
 2814       BUG ();
 2815     }
 2816     brelse (bh);
 2817     return 0 ;
 2818   }
 2819   bh = sb_get_hash_table(p_s_sb, blocknr) ;
 2820   /* if it is journal new, we just remove it from this transaction */
 2821   if (bh && buffer_journal_new(bh)) {
 2822     mark_buffer_notjournal_new(bh) ;
 2823     clear_prepared_bits(bh) ;
 2824     cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
 2825   } else {
 2826     /* set the bit for this block in the journal bitmap for this transaction */
 2827     jb = SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap ;
 2828     if (!jb) {
 2829       reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
 2830     }
 2831     set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
 2832 
 2833     /* Note, the entire while loop is not allowed to schedule.  */
 2834 
 2835     if (bh) {
 2836       clear_prepared_bits(bh) ;
 2837     }
 2838     cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
 2839 
 2840     /* find all older transactions with this block, make sure they don't try to write it out */
 2841     cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
 2842     while (cn) {
 2843       if (p_s_sb->s_dev == cn->dev && blocknr == cn->blocknr) {
 2844         set_bit(BLOCK_FREED, &cn->state) ;
 2845         if (cn->bh) {
 2846           if (!cleaned) {
 2847             /* remove_from_transaction will brelse the buffer if it was 
 2848             ** in the current trans
 2849             */
 2850             mark_buffer_notjournal_dirty(cn->bh) ;
 2851             cleaned = 1 ;
 2852             put_bh(cn->bh) ;
 2853             if (atomic_read(&(cn->bh->b_count)) < 0) {
 2854               reiserfs_warning(p_s_sb, "journal-2138: cn->bh->b_count < 0\n") ;
 2855             }
 2856           }
 2857           if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
 2858             atomic_dec(&(cn->jlist->j_nonzerolen)) ;
 2859           }
 2860           cn->bh = NULL ; 
 2861         } 
 2862       }
 2863       cn = cn->hnext ;
 2864     }
 2865   }
 2866 
 2867   if (bh) {
 2868     reiserfs_clean_and_file_buffer(bh) ;
 2869     put_bh(bh) ; /* get_hash grabs the buffer */
 2870     if (atomic_read(&(bh->b_count)) < 0) {
 2871       reiserfs_warning(p_s_sb, "journal-2165: bh->b_count < 0\n") ;
 2872     }
 2873   }
 2874   return 0 ;
 2875 }
 2876 
 2877 void reiserfs_update_inode_transaction(struct inode *inode) {
 2878   
 2879   inode->u.reiserfs_i.i_trans_index = SB_JOURNAL_LIST_INDEX(inode->i_sb);
 2880 
 2881   inode->u.reiserfs_i.i_trans_id = SB_JOURNAL(inode->i_sb)->j_trans_id ;
 2882 }
 2883 
 2884 void reiserfs_update_tail_transaction(struct inode *inode) {
 2885   
 2886   inode->u.reiserfs_i.i_tail_trans_index = SB_JOURNAL_LIST_INDEX(inode->i_sb);
 2887 
 2888   inode->u.reiserfs_i.i_tail_trans_id = SB_JOURNAL(inode->i_sb)->j_trans_id ;
 2889 }
 2890 
 2891 static void __commit_trans_index(struct inode *inode, unsigned long id,
 2892                                  unsigned long index) 
 2893 {
 2894     struct reiserfs_journal_list *jl ;
 2895     struct reiserfs_transaction_handle th ;
 2896     struct super_block *sb = inode->i_sb ;
 2897 
 2898     jl = SB_JOURNAL_LIST(sb) + index;
 2899 
 2900     /* is it from the current transaction, or from an unknown transaction? */
 2901     if (id == SB_JOURNAL(sb)->j_trans_id) {
 2902         journal_join(&th, sb, 1) ;
 2903         journal_end_sync(&th, sb, 1) ;
 2904     } else if (jl->j_trans_id == id) {
 2905         flush_commit_list(sb, jl, 1) ;
 2906     }
 2907     /* if the transaction id does not match, this list is long since flushed
 2908     ** and we don't have to do anything here
 2909     */
 2910 }
 2911 void reiserfs_commit_for_tail(struct inode *inode) {
 2912     unsigned long id = inode->u.reiserfs_i.i_tail_trans_id;
 2913     unsigned long index = inode->u.reiserfs_i.i_tail_trans_index;
 2914 
 2915     /* for tails, if this info is unset there's nothing to commit */
 2916     if (id && index)
 2917         __commit_trans_index(inode, id, index);
 2918 }
 2919 void reiserfs_commit_for_inode(struct inode *inode) {
 2920     unsigned long id = inode->u.reiserfs_i.i_trans_id;
 2921     unsigned long index = inode->u.reiserfs_i.i_trans_index;
 2922 
 2923     /* for the whole inode, assume unset id or index means it was
 2924      * changed in the current transaction.  More conservative
 2925      */
 2926     if (!id || !index)
 2927         reiserfs_update_inode_transaction(inode) ;
 2928 
 2929     __commit_trans_index(inode, id, index);
 2930 }
 2931 
 2932 void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb, 
 2933                                       struct buffer_head *bh) {
 2934   PROC_INFO_INC( p_s_sb, journal.restore_prepared );
 2935   if (reiserfs_dont_log (p_s_sb))
 2936     return;
 2937 
 2938   if (!bh) {
 2939     return ;
 2940   }
 2941   clear_bit(BH_JPrepared, &bh->b_state) ;
 2942 }
 2943 
 2944 extern struct tree_balance *cur_tb ;
 2945 /*
 2946 ** before we can change a metadata block, we have to make sure it won't
 2947 ** be written to disk while we are altering it.  So, we must:
 2948 ** clean it
 2949 ** wait on it.
 2950 ** 
 2951 */
 2952 void reiserfs_prepare_for_journal(struct super_block *p_s_sb, 
 2953                                   struct buffer_head *bh, int wait) {
 2954   int retry_count = 0 ;
 2955 
 2956   PROC_INFO_INC( p_s_sb, journal.prepare );
 2957   if (reiserfs_dont_log (p_s_sb))
 2958     return;
 2959 
 2960   while(!test_bit(BH_JPrepared, &bh->b_state) ||
 2961         (wait && buffer_locked(bh))) {
 2962     if (buffer_journaled(bh)) {
 2963       set_bit(BH_JPrepared, &bh->b_state) ;
 2964       return ;
 2965     }
 2966     set_bit(BH_JPrepared, &bh->b_state) ;
 2967     if (wait) {
 2968       RFALSE( buffer_locked(bh) && cur_tb != NULL,
 2969               "waiting while do_balance was running\n") ;
 2970       wait_on_buffer(bh) ;
 2971     }
 2972     PROC_INFO_INC( p_s_sb, journal.prepare_retry );
 2973     retry_count++ ;
 2974   }
 2975 }
 2976 
 2977 /* 
 2978 ** long and ugly.  If flush, will not return until all commit
 2979 ** blocks and all real buffers in the trans are on disk.
 2980 ** If no_async, won't return until all commit blocks are on disk.
 2981 **
 2982 ** keep reading, there are comments as you go along
 2983 */
 2984 static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block  * p_s_sb, unsigned long nblocks, 
 2985                           int flags) {
 2986   struct reiserfs_journal_cnode *cn, *next, *jl_cn; 
 2987   struct reiserfs_journal_cnode *last_cn = NULL;
 2988   struct reiserfs_journal_desc *desc ; 
 2989   struct reiserfs_journal_commit *commit ; 
 2990   struct buffer_head *c_bh ; /* commit bh */
 2991   struct buffer_head *d_bh ; /* desc bh */
 2992   int cur_write_start = 0 ; /* start index of current log write */
 2993   int cur_blocks_left = 0 ; /* number of journal blocks left to write */
 2994   int old_start ;
 2995   int i ;
 2996   int jindex ;
 2997   int orig_jindex ;
 2998   int flush = flags & FLUSH_ALL ;
 2999   int commit_now = flags & COMMIT_NOW ;
 3000   int wait_on_commit = flags & WAIT ;
 3001   struct reiserfs_super_block *rs ; 
 3002 
 3003   if (reiserfs_dont_log(th->t_super)) {
 3004     return 0 ;
 3005   }
 3006 
 3007   lock_journal(p_s_sb) ;
 3008   if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
 3009     flags |= FLUSH_ALL ;
 3010     flush = 1 ;
 3011   }
 3012   if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
 3013     flags |= COMMIT_NOW ;
 3014     commit_now = 1 ;
 3015   }
 3016 
 3017   /* check_journal_end locks the journal, and unlocks if it does not return 1 
 3018   ** it tells us if we should continue with the journal_end, or just return
 3019   */
 3020   if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
 3021     return 0 ;
 3022   }
 3023 
 3024   /* check_journal_end might set these, check again */
 3025   if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
 3026     flush = 1 ;
 3027   }
 3028   if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
 3029     commit_now = 1 ;
 3030   }
 3031   /*
 3032   ** j must wait means we have to flush the log blocks, and the real blocks for
 3033   ** this transaction
 3034   */
 3035   if (SB_JOURNAL(p_s_sb)->j_must_wait > 0) {
 3036     flush = 1 ;
 3037   }
 3038 
 3039 #ifdef REISERFS_PREALLOCATE
 3040   reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
 3041                                       * the transaction */
 3042 #endif
 3043   
 3044   rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
 3045   /* setup description block */
 3046   d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ; 
 3047   mark_buffer_uptodate(d_bh, 1) ;
 3048   desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
 3049   memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
 3050   memcpy(desc->j_magic, JOURNAL_DESC_MAGIC, 8) ;
 3051   desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
 3052 
 3053   /* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
 3054   c_bh =  journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
 3055                  ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
 3056   commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
 3057   memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
 3058   commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
 3059   mark_buffer_uptodate(c_bh, 1) ;
 3060 
 3061   /* init this journal list */
 3062   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_older_commits_done), 0) ;
 3063   SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
 3064   SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_timestamp = SB_JOURNAL(p_s_sb)->j_trans_start_time ;
 3065   SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_bh = c_bh ;
 3066   SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_start = SB_JOURNAL(p_s_sb)->j_start ;
 3067   SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len = SB_JOURNAL(p_s_sb)->j_len ;  
 3068   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_nonzerolen), SB_JOURNAL(p_s_sb)->j_len) ;
 3069   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_left), SB_JOURNAL(p_s_sb)->j_len + 2);
 3070   SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = NULL ;
 3071   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
 3072   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
 3073 
 3074   /* which is faster, locking/unlocking at the start and end of the for
 3075   ** or locking once per iteration around the insert_journal_hash?
 3076   ** eitherway, we are write locking insert_journal_hash.  The ENTIRE FOR
 3077   ** LOOP MUST not cause schedule to occur.
 3078   */
 3079 
 3080   /* for each real block, add it to the journal list hash,
 3081   ** copy into real block index array in the commit or desc block
 3082   */
 3083   for (i = 0, cn = SB_JOURNAL(p_s_sb)->j_first ; cn ; cn = cn->next, i++) {
 3084     if (test_bit(BH_JDirty, &cn->bh->b_state) ) {
 3085       jl_cn = get_cnode(p_s_sb) ;
 3086       if (!jl_cn) {
 3087         reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
 3088       }
 3089       if (i == 0) {
 3090         SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = jl_cn ;
 3091       }
 3092       jl_cn->prev = last_cn ;
 3093       jl_cn->next = NULL ;
 3094       if (last_cn) {
 3095         last_cn->next = jl_cn ;
 3096       }
 3097       last_cn = jl_cn ;
 3098       /* make sure the block we are trying to log is not a block 
 3099          of journal or reserved area */
 3100 
 3101       if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
 3102         reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
 3103       }
 3104       jl_cn->blocknr = cn->bh->b_blocknr ; 
 3105       jl_cn->state = 0 ;
 3106       jl_cn->dev = cn->bh->b_dev ; 
 3107       jl_cn->bh = cn->bh ;
 3108       jl_cn->jlist = SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb) ;
 3109       insert_journal_hash(SB_JOURNAL(p_s_sb)->j_list_hash_table, jl_cn) ; 
 3110       if (i < JOURNAL_TRANS_HALF) {
 3111         desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
 3112       } else {
 3113         commit->j_realblock[i - JOURNAL_TRANS_HALF] = cpu_to_le32(cn->bh->b_blocknr) ;
 3114       }
 3115     } else {
 3116       i-- ;
 3117     }
 3118   }
 3119 
 3120   desc->j_len = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_len)  ;
 3121   desc->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
 3122   desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
 3123   commit->j_len = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_len) ;
 3124 
 3125   /* special check in case all buffers in the journal were marked for not logging */
 3126   if (SB_JOURNAL(p_s_sb)->j_len == 0) {
 3127     brelse(d_bh) ;
 3128     brelse(c_bh) ;
 3129     unlock_journal(p_s_sb) ;
 3130 reiserfs_warning(p_s_sb, "journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
 3131     atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
 3132     wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
 3133     return 0 ;
 3134   }
 3135 
 3136   /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
 3137   cur_write_start = SB_JOURNAL(p_s_sb)->j_start ;
 3138   cur_blocks_left = SB_JOURNAL(p_s_sb)->j_len  ;
 3139   cn = SB_JOURNAL(p_s_sb)->j_first ;
 3140   jindex = 1 ; /* start at one so we don't get the desc again */
 3141   while(cur_blocks_left > 0) {
 3142     /* copy all the real blocks into log area.  dirty log blocks */
 3143     if (test_bit(BH_JDirty, &cn->bh->b_state)) {
 3144       struct buffer_head *tmp_bh ;
 3145       tmp_bh =  journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
 3146                        ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
 3147       mark_buffer_uptodate(tmp_bh, 1) ;
 3148       memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;  
 3149       jindex++ ;
 3150     } else {
 3151       /* JDirty cleared sometime during transaction.  don't log this one */
 3152       reiserfs_warning(p_s_sb, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!\n") ;
 3153     }
 3154     cn = cn->next ;
 3155     cur_blocks_left-- ;
 3156   }
 3157 
 3158   /* we are done  with both the c_bh and d_bh, but
 3159   ** c_bh must be written after all other commit blocks,
 3160   ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
 3161   */
 3162 
 3163   /* now loop through and mark all buffers from this transaction as JDirty_wait
 3164   ** clear the JDirty bit, clear BH_JNew too.  
 3165   ** if they weren't JDirty, they weren't logged, just relse them and move on
 3166   */
 3167   cn = SB_JOURNAL(p_s_sb)->j_first ; 
 3168   while(cn) {
 3169     clear_bit(BH_JNew, &(cn->bh->b_state)) ;
 3170     if (test_bit(BH_JDirty, &(cn->bh->b_state))) {
 3171       set_bit(BH_JDirty_wait, &(cn->bh->b_state)) ; 
 3172       clear_bit(BH_JDirty, &(cn->bh->b_state)) ;
 3173     } else {
 3174       brelse(cn->bh) ;
 3175     }
 3176     next = cn->next ;
 3177     free_cnode(p_s_sb, cn) ;
 3178     cn = next ;
 3179   }
 3180 
 3181   /* unlock the journal list for committing and flushing */
 3182   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 0) ;
 3183   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 0) ;
 3184 
 3185   orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
 3186   jindex = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ; 
 3187   SB_JOURNAL_LIST_INDEX(p_s_sb) = jindex ;
 3188 
 3189   /* write any buffers that must hit disk before this commit is done */
 3190   fsync_buffers_list(&(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
 3191 
 3192   /* honor the flush and async wishes from the caller */
 3193   if (flush) {
 3194   
 3195     flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
 3196     flush_journal_list(p_s_sb,  SB_JOURNAL_LIST(p_s_sb) + orig_jindex , 1) ;  
 3197   } else if (commit_now) {
 3198     if (wait_on_commit) {
 3199       flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
 3200     } else {
 3201       commit_flush_async(p_s_sb, orig_jindex) ; 
 3202     }
 3203   }
 3204 
 3205   /* reset journal values for the next transaction */
 3206   old_start = SB_JOURNAL(p_s_sb)->j_start ;
 3207   SB_JOURNAL(p_s_sb)->j_start = (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
 3208   atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
 3209   SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
 3210   SB_JOURNAL(p_s_sb)->j_last = NULL ;
 3211   SB_JOURNAL(p_s_sb)->j_first = NULL ;
 3212   SB_JOURNAL(p_s_sb)->j_len = 0 ;
 3213   SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
 3214   SB_JOURNAL(p_s_sb)->j_trans_id++ ;
 3215   SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
 3216   SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
 3217   SB_JOURNAL(p_s_sb)->j_next_full_flush = 0 ;
 3218   SB_JOURNAL(p_s_sb)->j_next_async_flush = 0 ;
 3219   init_journal_hash(p_s_sb) ; 
 3220 
 3221   /* if the next transaction has any chance of wrapping, flush 
 3222   ** transactions that might get overwritten.  If any journal lists are very 
 3223   ** old flush them as well.  
 3224   */
 3225   for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
 3226     jindex = i ;
 3227     if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 && SB_JOURNAL(p_s_sb)->j_start <= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
 3228       if ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) >= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
 3229         flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ; 
 3230       }
 3231     } else if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 && 
 3232               (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
 3233       if (((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= 
 3234             SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
 3235         flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ; 
 3236       }
 3237     } 
 3238     /* this check should always be run, to send old lists to disk */
 3239     if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 && 
 3240               SB_JOURNAL_LIST(p_s_sb)[jindex].j_timestamp < 
 3241               (CURRENT_TIME - (SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) * 4))) {
 3242         flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ; 
 3243     }
 3244   }
 3245 
 3246   /* if the next journal_list is still in use, flush it */
 3247   if (SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len != 0) {
 3248     flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb), 1) ; 
 3249   }
 3250 
 3251   /* we don't want anyone flushing the new transaction's list */
 3252   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
 3253   atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
 3254   SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + 
 3255                                                                                          SB_JOURNAL_LIST_INDEX(p_s_sb)) ;
 3256 
 3257   if (!(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap)) {
 3258     reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
 3259   }
 3260   unlock_journal(p_s_sb) ;
 3261   atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
 3262   /* wake up any body waiting to join. */
 3263   wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
 3264   return 0 ;
 3265 }
 3266 
 3267 
 3268 

Cache object: 65f8838c615999d1b736fa41bcdb2695


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.