The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/jfs/jfs_txnmgr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *   Copyright (c) International Business Machines Corp., 2000-2002
    3  *   Portions Copyright (c) Christoph Hellwig, 2001-2002
    4  *
    5  *   This program is free software;  you can redistribute it and/or modify
    6  *   it under the terms of the GNU General Public License as published by
    7  *   the Free Software Foundation; either version 2 of the License, or 
    8  *   (at your option) any later version.
    9  * 
   10  *   This program is distributed in the hope that it will be useful,
   11  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
   12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
   13  *   the GNU General Public License for more details.
   14  *
   15  *   You should have received a copy of the GNU General Public License
   16  *   along with this program;  if not, write to the Free Software 
   17  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
   18  */
   19 
   20 /*
   21  *      jfs_txnmgr.c: transaction manager
   22  *
   23  * notes:
   24  * transaction starts with txBegin() and ends with txCommit()
   25  * or txAbort().
   26  *
   27  * tlock is acquired at the time of update;
   28  * (obviate scan at commit time for xtree and dtree)
   29  * tlock and mp points to each other;
   30  * (no hashlist for mp -> tlock).
   31  *
   32  * special cases:
   33  * tlock on in-memory inode:
   34  * in-place tlock in the in-memory inode itself;
   35  * converted to page lock by iWrite() at commit time.
   36  *
   37  * tlock during write()/mmap() under anonymous transaction (tid = 0):
   38  * transferred (?) to transaction at commit time.
   39  *
   40  * use the page itself to update allocation maps
   41  * (obviate intermediate replication of allocation/deallocation data)
   42  * hold on to mp+lock thru update of maps
   43  */
   44 
   45 
   46 #include <linux/fs.h>
   47 #include <linux/vmalloc.h>
   48 #include <linux/smp_lock.h>
   49 #include <linux/completion.h>
   50 #include "jfs_incore.h"
   51 #include "jfs_filsys.h"
   52 #include "jfs_metapage.h"
   53 #include "jfs_dinode.h"
   54 #include "jfs_imap.h"
   55 #include "jfs_dmap.h"
   56 #include "jfs_superblock.h"
   57 #include "jfs_debug.h"
   58 
   59 /*
   60  *      transaction management structures
   61  */
   62 static struct {
   63         /* tblock */
   64         int freetid;            /* index of a free tid structure */
   65         wait_queue_head_t freewait;     /* eventlist of free tblock */
   66 
   67         /* tlock */
   68         int freelock;           /* index first free lock word */
   69         wait_queue_head_t freelockwait; /* eventlist of free tlock */
   70         wait_queue_head_t lowlockwait;  /* eventlist of ample tlocks */
   71         int tlocksInUse;        /* Number of tlocks in use */
   72         int TlocksLow;          /* Indicates low number of available tlocks */
   73         spinlock_t LazyLock;    /* synchronize sync_queue & unlock_queue */
   74 /*      struct tblock *sync_queue; * Transactions waiting for data sync */
   75         struct tblock *unlock_queue;    /* Txns waiting to be released */
   76         struct tblock *unlock_tail;     /* Tail of unlock_queue */
   77         struct list_head anon_list;     /* inodes having anonymous txns */
   78         struct list_head anon_list2;    /* inodes having anonymous txns
   79                                            that couldn't be sync'ed */
   80 } TxAnchor;
   81 
   82 #ifdef CONFIG_JFS_STATISTICS
   83 struct {
   84         uint txBegin;
   85         uint txBegin_barrier;
   86         uint txBegin_lockslow;
   87         uint txBegin_freetid;
   88         uint txBeginAnon;
   89         uint txBeginAnon_barrier;
   90         uint txBeginAnon_lockslow;
   91         uint txLockAlloc;
   92         uint txLockAlloc_freelock;
   93 } TxStat;
   94 #endif
   95 
   96 static int nTxBlock = 512;      /* number of transaction blocks */
   97 struct tblock *TxBlock;         /* transaction block table */
   98 
   99 static int nTxLock = 4096;      /* number of transaction locks */
  100 static int TxLockLWM = 4096*.4; /* Low water mark for number of txLocks used */
  101 static int TxLockHWM = 4096*.8; /* High water mark for number of txLocks used */
  102 struct tlock *TxLock;           /* transaction lock table */
  103 
  104 
  105 /*
  106  *      transaction management lock
  107  */
  108 static spinlock_t jfsTxnLock = SPIN_LOCK_UNLOCKED;
  109 
  110 #define TXN_LOCK()              spin_lock(&jfsTxnLock)
  111 #define TXN_UNLOCK()            spin_unlock(&jfsTxnLock)
  112 
  113 #define LAZY_LOCK_INIT()        spin_lock_init(&TxAnchor.LazyLock);
  114 #define LAZY_LOCK(flags)        spin_lock_irqsave(&TxAnchor.LazyLock, flags)
  115 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
  116 
  117 DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait);
  118 DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
  119 
  120 /*
  121  * Retry logic exist outside these macros to protect from spurrious wakeups.
  122  */
  123 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
  124 {
  125         DECLARE_WAITQUEUE(wait, current);
  126 
  127         add_wait_queue(event, &wait);
  128         set_current_state(TASK_UNINTERRUPTIBLE);
  129         TXN_UNLOCK();
  130         schedule();
  131         current->state = TASK_RUNNING;
  132         remove_wait_queue(event, &wait);
  133 }
  134 
  135 #define TXN_SLEEP(event)\
  136 {\
  137         TXN_SLEEP_DROP_LOCK(event);\
  138         TXN_LOCK();\
  139 }
  140 
  141 #define TXN_WAKEUP(event) wake_up_all(event)
  142 
  143 
  144 /*
  145  *      statistics
  146  */
  147 struct {
  148         tid_t maxtid;           /* 4: biggest tid ever used */
  149         lid_t maxlid;           /* 4: biggest lid ever used */
  150         int ntid;               /* 4: # of transactions performed */
  151         int nlid;               /* 4: # of tlocks acquired */
  152         int waitlock;           /* 4: # of tlock wait */
  153 } stattx;
  154 
  155 
  156 /*
  157  * external references
  158  */
  159 extern int lmGroupCommit(struct jfs_log *, struct tblock *);
  160 extern void lmSync(struct jfs_log *);
  161 extern int jfs_commit_inode(struct inode *, int);
  162 extern int jfs_stop_threads;
  163 
  164 struct task_struct *jfsCommitTask;
  165 extern struct completion jfsIOwait;
  166 
  167 /*
  168  * forward references
  169  */
  170 int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
  171           struct tlock * tlck, struct commit * cd);
  172 int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
  173             struct tlock * tlck);
  174 void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
  175            struct tlock * tlck);
  176 void inlineLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
  177                struct tlock * tlck);
  178 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
  179             struct tlock * tlck);
  180 void txAbortCommit(struct commit * cd, int exval);
  181 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
  182                         struct tblock * tblk);
  183 void txForce(struct tblock * tblk);
  184 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd);
  185 int txMoreLock(void);
  186 static void txUpdateMap(struct tblock * tblk);
  187 static void txRelease(struct tblock * tblk);
  188 void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
  189            struct tlock * tlck);
  190 static void LogSyncRelease(struct metapage * mp);
  191 
  192 /*
  193  *              transaction block/lock management
  194  *              ---------------------------------
  195  */
  196 
  197 /*
  198  * Get a transaction lock from the free list.  If the number in use is
  199  * greater than the high water mark, wake up the sync daemon.  This should
  200  * free some anonymous transaction locks.  (TXN_LOCK must be held.)
  201  */
  202 static lid_t txLockAlloc(void)
  203 {
  204         lid_t lid;
  205 
  206         INCREMENT(TxStat.txLockAlloc);
  207         if (!TxAnchor.freelock) {
  208                 INCREMENT(TxStat.txLockAlloc_freelock);
  209         }
  210 
  211         while (!(lid = TxAnchor.freelock))
  212                 TXN_SLEEP(&TxAnchor.freelockwait);
  213         TxAnchor.freelock = TxLock[lid].next;
  214         HIGHWATERMARK(stattx.maxlid, lid);
  215         if ((++TxAnchor.tlocksInUse > TxLockHWM) && (TxAnchor.TlocksLow == 0)) {
  216                 jfs_info("txLockAlloc TlocksLow");
  217                 TxAnchor.TlocksLow = 1;
  218                 wake_up(&jfs_sync_thread_wait);
  219         }
  220 
  221         return lid;
  222 }
  223 
  224 static void txLockFree(lid_t lid)
  225 {
  226         TxLock[lid].next = TxAnchor.freelock;
  227         TxAnchor.freelock = lid;
  228         TxAnchor.tlocksInUse--;
  229         if (TxAnchor.TlocksLow && (TxAnchor.tlocksInUse < TxLockLWM)) {
  230                 jfs_info("txLockFree TlocksLow no more");
  231                 TxAnchor.TlocksLow = 0;
  232                 TXN_WAKEUP(&TxAnchor.lowlockwait);
  233         }
  234         TXN_WAKEUP(&TxAnchor.freelockwait);
  235 }
  236 
  237 /*
  238  * NAME:        txInit()
  239  *
  240  * FUNCTION:    initialize transaction management structures
  241  *
  242  * RETURN:
  243  *
  244  * serialization: single thread at jfs_init()
  245  */
  246 int txInit(void)
  247 {
  248         int k, size;
  249 
  250         /*
  251          * initialize transaction block (tblock) table
  252          *
  253          * transaction id (tid) = tblock index
  254          * tid = 0 is reserved.
  255          */
  256         size = sizeof(struct tblock) * nTxBlock;
  257         TxBlock = (struct tblock *) vmalloc(size);
  258         if (TxBlock == NULL)
  259                 return ENOMEM;
  260 
  261         for (k = 1; k < nTxBlock - 1; k++) {
  262                 TxBlock[k].next = k + 1;
  263                 init_waitqueue_head(&TxBlock[k].gcwait);
  264                 init_waitqueue_head(&TxBlock[k].waitor);
  265         }
  266         TxBlock[k].next = 0;
  267         init_waitqueue_head(&TxBlock[k].gcwait);
  268         init_waitqueue_head(&TxBlock[k].waitor);
  269 
  270         TxAnchor.freetid = 1;
  271         init_waitqueue_head(&TxAnchor.freewait);
  272 
  273         stattx.maxtid = 1;      /* statistics */
  274 
  275         /*
  276          * initialize transaction lock (tlock) table
  277          *
  278          * transaction lock id = tlock index
  279          * tlock id = 0 is reserved.
  280          */
  281         size = sizeof(struct tlock) * nTxLock;
  282         TxLock = (struct tlock *) vmalloc(size);
  283         if (TxLock == NULL) {
  284                 vfree(TxBlock);
  285                 return ENOMEM;
  286         }
  287 
  288         /* initialize tlock table */
  289         for (k = 1; k < nTxLock - 1; k++)
  290                 TxLock[k].next = k + 1;
  291         TxLock[k].next = 0;
  292         init_waitqueue_head(&TxAnchor.freelockwait);
  293         init_waitqueue_head(&TxAnchor.lowlockwait);
  294 
  295         TxAnchor.freelock = 1;
  296         TxAnchor.tlocksInUse = 0;
  297         INIT_LIST_HEAD(&TxAnchor.anon_list);
  298         INIT_LIST_HEAD(&TxAnchor.anon_list2);
  299 
  300         stattx.maxlid = 1;      /* statistics */
  301 
  302         return 0;
  303 }
  304 
  305 /*
  306  * NAME:        txExit()
  307  *
  308  * FUNCTION:    clean up when module is unloaded
  309  */
  310 void txExit(void)
  311 {
  312         vfree(TxLock);
  313         TxLock = 0;
  314         vfree(TxBlock);
  315         TxBlock = 0;
  316 }
  317 
  318 
  319 /*
  320  * NAME:        txBegin()
  321  *
  322  * FUNCTION:    start a transaction.
  323  *
  324  * PARAMETER:   sb      - superblock
  325  *              flag    - force for nested tx;
  326  *
  327  * RETURN:      tid     - transaction id
  328  *
  329  * note: flag force allows to start tx for nested tx
  330  * to prevent deadlock on logsync barrier;
  331  */
  332 tid_t txBegin(struct super_block *sb, int flag)
  333 {
  334         tid_t t;
  335         struct tblock *tblk;
  336         struct jfs_log *log;
  337 
  338         jfs_info("txBegin: flag = 0x%x", flag);
  339         log = JFS_SBI(sb)->log;
  340 
  341         TXN_LOCK();
  342 
  343         INCREMENT(TxStat.txBegin);
  344 
  345       retry:
  346         if (!(flag & COMMIT_FORCE)) {
  347                 /*
  348                  * synchronize with logsync barrier
  349                  */
  350                 if (test_bit(log_SYNCBARRIER, &log->flag) ||
  351                     test_bit(log_QUIESCE, &log->flag)) {
  352                         INCREMENT(TxStat.txBegin_barrier);
  353                         TXN_SLEEP(&log->syncwait);
  354                         goto retry;
  355                 }
  356         }
  357         if (flag == 0) {
  358                 /*
  359                  * Don't begin transaction if we're getting starved for tlocks
  360                  * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
  361                  * free tlocks)
  362                  */
  363                 if (TxAnchor.TlocksLow) {
  364                         INCREMENT(TxStat.txBegin_lockslow);
  365                         TXN_SLEEP(&TxAnchor.lowlockwait);
  366                         goto retry;
  367                 }
  368         }
  369 
  370         /*
  371          * allocate transaction id/block
  372          */
  373         if ((t = TxAnchor.freetid) == 0) {
  374                 jfs_info("txBegin: waiting for free tid");
  375                 INCREMENT(TxStat.txBegin_freetid);
  376                 TXN_SLEEP(&TxAnchor.freewait);
  377                 goto retry;
  378         }
  379 
  380         tblk = tid_to_tblock(t);
  381 
  382         if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
  383                 /* Don't let a non-forced transaction take the last tblk */
  384                 jfs_info("txBegin: waiting for free tid");
  385                 INCREMENT(TxStat.txBegin_freetid);
  386                 TXN_SLEEP(&TxAnchor.freewait);
  387                 goto retry;
  388         }
  389 
  390         TxAnchor.freetid = tblk->next;
  391 
  392         /*
  393          * initialize transaction
  394          */
  395 
  396         /*
  397          * We can't zero the whole thing or we screw up another thread being
  398          * awakened after sleeping on tblk->waitor
  399          *
  400          * memset(tblk, 0, sizeof(struct tblock));
  401          */
  402         tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
  403 
  404         tblk->sb = sb;
  405         ++log->logtid;
  406         tblk->logtid = log->logtid;
  407 
  408         ++log->active;
  409 
  410         HIGHWATERMARK(stattx.maxtid, t);        /* statistics */
  411         INCREMENT(stattx.ntid); /* statistics */
  412 
  413         TXN_UNLOCK();
  414 
  415         jfs_info("txBegin: returning tid = %d", t);
  416 
  417         return t;
  418 }
  419 
  420 
  421 /*
  422  * NAME:        txBeginAnon()
  423  *
  424  * FUNCTION:    start an anonymous transaction.
  425  *              Blocks if logsync or available tlocks are low to prevent
  426  *              anonymous tlocks from depleting supply.
  427  *
  428  * PARAMETER:   sb      - superblock
  429  *
  430  * RETURN:      none
  431  */
  432 void txBeginAnon(struct super_block *sb)
  433 {
  434         struct jfs_log *log;
  435 
  436         log = JFS_SBI(sb)->log;
  437 
  438         TXN_LOCK();
  439         INCREMENT(TxStat.txBeginAnon);
  440 
  441       retry:
  442         /*
  443          * synchronize with logsync barrier
  444          */
  445         if (test_bit(log_SYNCBARRIER, &log->flag) ||
  446             test_bit(log_QUIESCE, &log->flag)) {
  447                 INCREMENT(TxStat.txBeginAnon_barrier);
  448                 TXN_SLEEP(&log->syncwait);
  449                 goto retry;
  450         }
  451 
  452         /*
  453          * Don't begin transaction if we're getting starved for tlocks
  454          */
  455         if (TxAnchor.TlocksLow) {
  456                 INCREMENT(TxStat.txBeginAnon_lockslow);
  457                 TXN_SLEEP(&TxAnchor.lowlockwait);
  458                 goto retry;
  459         }
  460         TXN_UNLOCK();
  461 }
  462 
  463 
  464 /*
  465  *      txEnd()
  466  *
  467  * function: free specified transaction block.
  468  *
  469  *      logsync barrier processing:
  470  *
  471  * serialization:
  472  */
  473 void txEnd(tid_t tid)
  474 {
  475         struct tblock *tblk = tid_to_tblock(tid);
  476         struct jfs_log *log;
  477 
  478         jfs_info("txEnd: tid = %d", tid);
  479         TXN_LOCK();
  480 
  481         /*
  482          * wakeup transactions waiting on the page locked
  483          * by the current transaction
  484          */
  485         TXN_WAKEUP(&tblk->waitor);
  486 
  487         log = JFS_SBI(tblk->sb)->log;
  488 
  489         /*
  490          * Lazy commit thread can't free this guy until we mark it UNLOCKED,
  491          * otherwise, we would be left with a transaction that may have been
  492          * reused.
  493          *
  494          * Lazy commit thread will turn off tblkGC_LAZY before calling this
  495          * routine.
  496          */
  497         if (tblk->flag & tblkGC_LAZY) {
  498                 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
  499                 TXN_UNLOCK();
  500 
  501                 spin_lock_irq(&log->gclock);    // LOGGC_LOCK
  502                 tblk->flag |= tblkGC_UNLOCKED;
  503                 spin_unlock_irq(&log->gclock);  // LOGGC_UNLOCK
  504                 return;
  505         }
  506 
  507         jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
  508 
  509         assert(tblk->next == 0);
  510 
  511         /*
  512          * insert tblock back on freelist
  513          */
  514         tblk->next = TxAnchor.freetid;
  515         TxAnchor.freetid = tid;
  516 
  517         /*
  518          * mark the tblock not active
  519          */
  520         if (--log->active == 0) {
  521                 clear_bit(log_FLUSH, &log->flag);
  522 
  523                 /*
  524                  * synchronize with logsync barrier
  525                  */
  526                 if (test_bit(log_SYNCBARRIER, &log->flag)) {
  527                         /* forward log syncpt */
  528                         /* lmSync(log); */
  529 
  530                         jfs_info("log barrier off: 0x%x", log->lsn);
  531 
  532                         /* enable new transactions start */
  533                         clear_bit(log_SYNCBARRIER, &log->flag);
  534 
  535                         /* wakeup all waitors for logsync barrier */
  536                         TXN_WAKEUP(&log->syncwait);
  537                 }
  538         }
  539 
  540         /*
  541          * wakeup all waitors for a free tblock
  542          */
  543         TXN_WAKEUP(&TxAnchor.freewait);
  544 
  545         TXN_UNLOCK();
  546 }
  547 
  548 
  549 /*
  550  *      txLock()
  551  *
  552  * function: acquire a transaction lock on the specified <mp>
  553  *
  554  * parameter:
  555  *
  556  * return:      transaction lock id
  557  *
  558  * serialization:
  559  */
  560 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
  561                      int type)
  562 {
  563         struct jfs_inode_info *jfs_ip = JFS_IP(ip);
  564         int dir_xtree = 0;
  565         lid_t lid;
  566         tid_t xtid;
  567         struct tlock *tlck;
  568         struct xtlock *xtlck;
  569         struct linelock *linelock;
  570         xtpage_t *p;
  571         struct tblock *tblk;
  572 
  573         assert(!test_cflag(COMMIT_Nolink, ip));
  574 
  575         TXN_LOCK();
  576 
  577         if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
  578             !(mp->xflag & COMMIT_PAGE)) {
  579                 /*
  580                  * Directory inode is special.  It can have both an xtree tlock
  581                  * and a dtree tlock associated with it.
  582                  */
  583                 dir_xtree = 1;
  584                 lid = jfs_ip->xtlid;
  585         } else
  586                 lid = mp->lid;
  587 
  588         /* is page not locked by a transaction ? */
  589         if (lid == 0)
  590                 goto allocateLock;
  591 
  592         jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
  593 
  594         /* is page locked by the requester transaction ? */
  595         tlck = lid_to_tlock(lid);
  596         if ((xtid = tlck->tid) == tid)
  597                 goto grantLock;
  598 
  599         /*
  600          * is page locked by anonymous transaction/lock ?
  601          *
  602          * (page update without transaction (i.e., file write) is
  603          * locked under anonymous transaction tid = 0:
  604          * anonymous tlocks maintained on anonymous tlock list of
  605          * the inode of the page and available to all anonymous
  606          * transactions until txCommit() time at which point
  607          * they are transferred to the transaction tlock list of
  608          * the commiting transaction of the inode)
  609          */
  610         if (xtid == 0) {
  611                 tlck->tid = tid;
  612                 tblk = tid_to_tblock(tid);
  613                 /*
  614                  * The order of the tlocks in the transaction is important
  615                  * (during truncate, child xtree pages must be freed before
  616                  * parent's tlocks change the working map).
  617                  * Take tlock off anonymous list and add to tail of
  618                  * transaction list
  619                  *
  620                  * Note:  We really need to get rid of the tid & lid and
  621                  * use list_head's.  This code is getting UGLY!
  622                  */
  623                 if (jfs_ip->atlhead == lid) {
  624                         if (jfs_ip->atltail == lid) {
  625                                 /* only anonymous txn.
  626                                  * Remove from anon_list
  627                                  */
  628                                 list_del_init(&jfs_ip->anon_inode_list);
  629                         }
  630                         jfs_ip->atlhead = tlck->next;
  631                 } else {
  632                         lid_t last;
  633                         for (last = jfs_ip->atlhead;
  634                              lid_to_tlock(last)->next != lid;
  635                              last = lid_to_tlock(last)->next) {
  636                                 assert(last);
  637                         }
  638                         lid_to_tlock(last)->next = tlck->next;
  639                         if (jfs_ip->atltail == lid)
  640                                 jfs_ip->atltail = last;
  641                 }
  642 
  643                 /* insert the tlock at tail of transaction tlock list */
  644 
  645                 if (tblk->next)
  646                         lid_to_tlock(tblk->last)->next = lid;
  647                 else
  648                         tblk->next = lid;
  649                 tlck->next = 0;
  650                 tblk->last = lid;
  651 
  652                 goto grantLock;
  653         }
  654 
  655         goto waitLock;
  656 
  657         /*
  658          * allocate a tlock
  659          */
  660       allocateLock:
  661         lid = txLockAlloc();
  662         tlck = lid_to_tlock(lid);
  663 
  664         /*
  665          * initialize tlock
  666          */
  667         tlck->tid = tid;
  668 
  669         /* mark tlock for meta-data page */
  670         if (mp->xflag & COMMIT_PAGE) {
  671 
  672                 tlck->flag = tlckPAGELOCK;
  673 
  674                 /* mark the page dirty and nohomeok */
  675                 mark_metapage_dirty(mp);
  676                 atomic_inc(&mp->nohomeok);
  677 
  678                 jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
  679                          mp, atomic_read(&mp->nohomeok), tid, tlck);
  680 
  681                 /* if anonymous transaction, and buffer is on the group
  682                  * commit synclist, mark inode to show this.  This will
  683                  * prevent the buffer from being marked nohomeok for too
  684                  * long a time.
  685                  */
  686                 if ((tid == 0) && mp->lsn)
  687                         set_cflag(COMMIT_Synclist, ip);
  688         }
  689         /* mark tlock for in-memory inode */
  690         else
  691                 tlck->flag = tlckINODELOCK;
  692 
  693         tlck->type = 0;
  694 
  695         /* bind the tlock and the page */
  696         tlck->ip = ip;
  697         tlck->mp = mp;
  698         if (dir_xtree)
  699                 jfs_ip->xtlid = lid;
  700         else
  701                 mp->lid = lid;
  702 
  703         /*
  704          * enqueue transaction lock to transaction/inode
  705          */
  706         /* insert the tlock at tail of transaction tlock list */
  707         if (tid) {
  708                 tblk = tid_to_tblock(tid);
  709                 if (tblk->next)
  710                         lid_to_tlock(tblk->last)->next = lid;
  711                 else
  712                         tblk->next = lid;
  713                 tlck->next = 0;
  714                 tblk->last = lid;
  715         }
  716         /* anonymous transaction:
  717          * insert the tlock at head of inode anonymous tlock list
  718          */
  719         else {
  720                 tlck->next = jfs_ip->atlhead;
  721                 jfs_ip->atlhead = lid;
  722                 if (tlck->next == 0) {
  723                         /* This inode's first anonymous transaction */
  724                         jfs_ip->atltail = lid;
  725                         list_add_tail(&jfs_ip->anon_inode_list,
  726                                       &TxAnchor.anon_list);
  727                 }
  728         }
  729 
  730         /* initialize type dependent area for linelock */
  731         linelock = (struct linelock *) & tlck->lock;
  732         linelock->next = 0;
  733         linelock->flag = tlckLINELOCK;
  734         linelock->maxcnt = TLOCKSHORT;
  735         linelock->index = 0;
  736 
  737         switch (type & tlckTYPE) {
  738         case tlckDTREE:
  739                 linelock->l2linesize = L2DTSLOTSIZE;
  740                 break;
  741 
  742         case tlckXTREE:
  743                 linelock->l2linesize = L2XTSLOTSIZE;
  744 
  745                 xtlck = (struct xtlock *) linelock;
  746                 xtlck->header.offset = 0;
  747                 xtlck->header.length = 2;
  748 
  749                 if (type & tlckNEW) {
  750                         xtlck->lwm.offset = XTENTRYSTART;
  751                 } else {
  752                         if (mp->xflag & COMMIT_PAGE)
  753                                 p = (xtpage_t *) mp->data;
  754                         else
  755                                 p = &jfs_ip->i_xtroot;
  756                         xtlck->lwm.offset =
  757                             le16_to_cpu(p->header.nextindex);
  758                 }
  759                 xtlck->lwm.length = 0;  /* ! */
  760                 xtlck->twm.offset = 0;
  761                 xtlck->hwm.offset = 0;
  762 
  763                 xtlck->index = 2;
  764                 break;
  765 
  766         case tlckINODE:
  767                 linelock->l2linesize = L2INODESLOTSIZE;
  768                 break;
  769 
  770         case tlckDATA:
  771                 linelock->l2linesize = L2DATASLOTSIZE;
  772                 break;
  773 
  774         default:
  775                 jfs_err("UFO tlock:0x%p", tlck);
  776         }
  777 
  778         /*
  779          * update tlock vector
  780          */
  781       grantLock:
  782         tlck->type |= type;
  783 
  784         TXN_UNLOCK();
  785 
  786         return tlck;
  787 
  788         /*
  789          * page is being locked by another transaction:
  790          */
  791       waitLock:
  792         /* Only locks on ipimap or ipaimap should reach here */
  793         /* assert(jfs_ip->fileset == AGGREGATE_I); */
  794         if (jfs_ip->fileset != AGGREGATE_I) {
  795                 jfs_err("txLock: trying to lock locked page!");
  796                 dump_mem("ip", ip, sizeof(struct inode));
  797                 dump_mem("mp", mp, sizeof(struct metapage));
  798                 dump_mem("Locker's tblk", tid_to_tblock(tid),
  799                          sizeof(struct tblock));
  800                 dump_mem("Tlock", tlck, sizeof(struct tlock));
  801                 BUG();
  802         }
  803         INCREMENT(stattx.waitlock);     /* statistics */
  804         release_metapage(mp);
  805 
  806         jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
  807                  tid, xtid, lid);
  808         TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
  809         jfs_info("txLock: awakened     tid = %d, lid = %d", tid, lid);
  810 
  811         return NULL;
  812 }
  813 
  814 
  815 /*
  816  * NAME:        txRelease()
  817  *
  818  * FUNCTION:    Release buffers associated with transaction locks, but don't
  819  *              mark homeok yet.  The allows other transactions to modify
  820  *              buffers, but won't let them go to disk until commit record
  821  *              actually gets written.
  822  *
  823  * PARAMETER:
  824  *              tblk    -
  825  *
  826  * RETURN:      Errors from subroutines.
  827  */
  828 static void txRelease(struct tblock * tblk)
  829 {
  830         struct metapage *mp;
  831         lid_t lid;
  832         struct tlock *tlck;
  833 
  834         TXN_LOCK();
  835 
  836         for (lid = tblk->next; lid; lid = tlck->next) {
  837                 tlck = lid_to_tlock(lid);
  838                 if ((mp = tlck->mp) != NULL &&
  839                     (tlck->type & tlckBTROOT) == 0) {
  840                         assert(mp->xflag & COMMIT_PAGE);
  841                         mp->lid = 0;
  842                 }
  843         }
  844 
  845         /*
  846          * wakeup transactions waiting on a page locked
  847          * by the current transaction
  848          */
  849         TXN_WAKEUP(&tblk->waitor);
  850 
  851         TXN_UNLOCK();
  852 }
  853 
  854 
  855 /*
  856  * NAME:        txUnlock()
  857  *
  858  * FUNCTION:    Initiates pageout of pages modified by tid in journalled
  859  *              objects and frees their lockwords.
  860  */
  861 static void txUnlock(struct tblock * tblk)
  862 {
  863         struct tlock *tlck;
  864         struct linelock *linelock;
  865         lid_t lid, next, llid, k;
  866         struct metapage *mp;
  867         struct jfs_log *log;
  868         int difft, diffp;
  869 
  870         jfs_info("txUnlock: tblk = 0x%p", tblk);
  871         log = JFS_SBI(tblk->sb)->log;
  872 
  873         /*
  874          * mark page under tlock homeok (its log has been written):
  875          */
  876         for (lid = tblk->next; lid; lid = next) {
  877                 tlck = lid_to_tlock(lid);
  878                 next = tlck->next;
  879 
  880                 jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
  881 
  882                 /* unbind page from tlock */
  883                 if ((mp = tlck->mp) != NULL &&
  884                     (tlck->type & tlckBTROOT) == 0) {
  885                         assert(mp->xflag & COMMIT_PAGE);
  886 
  887                         /* hold buffer
  888                          *
  889                          * It's possible that someone else has the metapage.
  890                          * The only things were changing are nohomeok, which
  891                          * is handled atomically, and clsn which is protected
  892                          * by the LOGSYNC_LOCK.
  893                          */
  894                         hold_metapage(mp, 1);
  895 
  896                         assert(atomic_read(&mp->nohomeok) > 0);
  897                         atomic_dec(&mp->nohomeok);
  898 
  899                         /* inherit younger/larger clsn */
  900                         LOGSYNC_LOCK(log);
  901                         if (mp->clsn) {
  902                                 logdiff(difft, tblk->clsn, log);
  903                                 logdiff(diffp, mp->clsn, log);
  904                                 if (difft > diffp)
  905                                         mp->clsn = tblk->clsn;
  906                         } else
  907                                 mp->clsn = tblk->clsn;
  908                         LOGSYNC_UNLOCK(log);
  909 
  910                         assert(!(tlck->flag & tlckFREEPAGE));
  911 
  912                         if (tlck->flag & tlckWRITEPAGE) {
  913                                 write_metapage(mp);
  914                         } else {
  915                                 /* release page which has been forced */
  916                                 release_metapage(mp);
  917                         }
  918                 }
  919 
  920                 /* insert tlock, and linelock(s) of the tlock if any,
  921                  * at head of freelist
  922                  */
  923                 TXN_LOCK();
  924 
  925                 llid = ((struct linelock *) & tlck->lock)->next;
  926                 while (llid) {
  927                         linelock = (struct linelock *) lid_to_tlock(llid);
  928                         k = linelock->next;
  929                         txLockFree(llid);
  930                         llid = k;
  931                 }
  932                 txLockFree(lid);
  933 
  934                 TXN_UNLOCK();
  935         }
  936         tblk->next = tblk->last = 0;
  937 
  938         /*
  939          * remove tblock from logsynclist
  940          * (allocation map pages inherited lsn of tblk and
  941          * has been inserted in logsync list at txUpdateMap())
  942          */
  943         if (tblk->lsn) {
  944                 LOGSYNC_LOCK(log);
  945                 log->count--;
  946                 list_del(&tblk->synclist);
  947                 LOGSYNC_UNLOCK(log);
  948         }
  949 }
  950 
  951 
  952 /*
  953  *      txMaplock()
  954  *
  955  * function: allocate a transaction lock for freed page/entry;
  956  *      for freed page, maplock is used as xtlock/dtlock type;
  957  */
  958 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
  959 {
  960         struct jfs_inode_info *jfs_ip = JFS_IP(ip);
  961         lid_t lid;
  962         struct tblock *tblk;
  963         struct tlock *tlck;
  964         struct maplock *maplock;
  965 
  966         TXN_LOCK();
  967 
  968         /*
  969          * allocate a tlock
  970          */
  971         lid = txLockAlloc();
  972         tlck = lid_to_tlock(lid);
  973 
  974         /*
  975          * initialize tlock
  976          */
  977         tlck->tid = tid;
  978 
  979         /* bind the tlock and the object */
  980         tlck->flag = tlckINODELOCK;
  981         tlck->ip = ip;
  982         tlck->mp = NULL;
  983 
  984         tlck->type = type;
  985 
  986         /*
  987          * enqueue transaction lock to transaction/inode
  988          */
  989         /* insert the tlock at tail of transaction tlock list */
  990         if (tid) {
  991                 tblk = tid_to_tblock(tid);
  992                 if (tblk->next)
  993                         lid_to_tlock(tblk->last)->next = lid;
  994                 else
  995                         tblk->next = lid;
  996                 tlck->next = 0;
  997                 tblk->last = lid;
  998         }
  999         /* anonymous transaction:
 1000          * insert the tlock at head of inode anonymous tlock list
 1001          */
 1002         else {
 1003                 tlck->next = jfs_ip->atlhead;
 1004                 jfs_ip->atlhead = lid;
 1005                 if (tlck->next == 0) {
 1006                         /* This inode's first anonymous transaction */
 1007                         jfs_ip->atltail = lid;
 1008                         list_add_tail(&jfs_ip->anon_inode_list,
 1009                                       &TxAnchor.anon_list);
 1010                 }
 1011         }
 1012 
 1013         TXN_UNLOCK();
 1014 
 1015         /* initialize type dependent area for maplock */
 1016         maplock = (struct maplock *) & tlck->lock;
 1017         maplock->next = 0;
 1018         maplock->maxcnt = 0;
 1019         maplock->index = 0;
 1020 
 1021         return tlck;
 1022 }
 1023 
 1024 
 1025 /*
 1026  *      txLinelock()
 1027  *
 1028  * function: allocate a transaction lock for log vector list
 1029  */
 1030 struct linelock *txLinelock(struct linelock * tlock)
 1031 {
 1032         lid_t lid;
 1033         struct tlock *tlck;
 1034         struct linelock *linelock;
 1035 
 1036         TXN_LOCK();
 1037 
 1038         /* allocate a TxLock structure */
 1039         lid = txLockAlloc();
 1040         tlck = lid_to_tlock(lid);
 1041 
 1042         TXN_UNLOCK();
 1043 
 1044         /* initialize linelock */
 1045         linelock = (struct linelock *) tlck;
 1046         linelock->next = 0;
 1047         linelock->flag = tlckLINELOCK;
 1048         linelock->maxcnt = TLOCKLONG;
 1049         linelock->index = 0;
 1050 
 1051         /* append linelock after tlock */
 1052         linelock->next = tlock->next;
 1053         tlock->next = lid;
 1054 
 1055         return linelock;
 1056 }
 1057 
 1058 
 1059 
 1060 /*
 1061  *              transaction commit management
 1062  *              -----------------------------
 1063  */
 1064 
 1065 /*
 1066  * NAME:        txCommit()
 1067  *
 1068  * FUNCTION:    commit the changes to the objects specified in
 1069  *              clist.  For journalled segments only the
 1070  *              changes of the caller are committed, ie by tid.
 1071  *              for non-journalled segments the data are flushed to
 1072  *              disk and then the change to the disk inode and indirect
 1073  *              blocks committed (so blocks newly allocated to the
 1074  *              segment will be made a part of the segment atomically).
 1075  *
 1076  *              all of the segments specified in clist must be in
 1077  *              one file system. no more than 6 segments are needed
 1078  *              to handle all unix svcs.
 1079  *
 1080  *              if the i_nlink field (i.e. disk inode link count)
 1081  *              is zero, and the type of inode is a regular file or
 1082  *              directory, or symbolic link , the inode is truncated
 1083  *              to zero length. the truncation is committed but the
 1084  *              VM resources are unaffected until it is closed (see
 1085  *              iput and iclose).
 1086  *
 1087  * PARAMETER:
 1088  *
 1089  * RETURN:
 1090  *
 1091  * serialization:
 1092  *              on entry the inode lock on each segment is assumed
 1093  *              to be held.
 1094  *
 1095  * i/o error:
 1096  */
 1097 int txCommit(tid_t tid,         /* transaction identifier */
 1098              int nip,           /* number of inodes to commit */
 1099              struct inode **iplist,     /* list of inode to commit */
 1100              int flag)
 1101 {
 1102         int rc = 0, rc1 = 0;
 1103         struct commit cd;
 1104         struct jfs_log *log;
 1105         struct tblock *tblk;
 1106         struct lrd *lrd;
 1107         int lsn;
 1108         struct inode *ip;
 1109         struct jfs_inode_info *jfs_ip;
 1110         int k, n;
 1111         ino_t top;
 1112         struct super_block *sb;
 1113 
 1114         jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
 1115         /* is read-only file system ? */
 1116         if (isReadOnly(iplist[0])) {
 1117                 rc = EROFS;
 1118                 goto TheEnd;
 1119         }
 1120 
 1121         sb = cd.sb = iplist[0]->i_sb;
 1122         cd.tid = tid;
 1123 
 1124         if (tid == 0)
 1125                 tid = txBegin(sb, 0);
 1126         tblk = tid_to_tblock(tid);
 1127 
 1128         /*
 1129          * initialize commit structure
 1130          */
 1131         log = JFS_SBI(sb)->log;
 1132         cd.log = log;
 1133 
 1134         /* initialize log record descriptor in commit */
 1135         lrd = &cd.lrd;
 1136         lrd->logtid = cpu_to_le32(tblk->logtid);
 1137         lrd->backchain = 0;
 1138 
 1139         tblk->xflag |= flag;
 1140 
 1141         if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
 1142                 tblk->xflag |= COMMIT_LAZY;
 1143         /*
 1144          *      prepare non-journaled objects for commit
 1145          *
 1146          * flush data pages of non-journaled file
 1147          * to prevent the file getting non-initialized disk blocks
 1148          * in case of crash.
 1149          * (new blocks - )
 1150          */
 1151         cd.iplist = iplist;
 1152         cd.nip = nip;
 1153 
 1154         /*
 1155          *      acquire transaction lock on (on-disk) inodes
 1156          *
 1157          * update on-disk inode from in-memory inode
 1158          * acquiring transaction locks for AFTER records
 1159          * on the on-disk inode of file object
 1160          *
 1161          * sort the inodes array by inode number in descending order
 1162          * to prevent deadlock when acquiring transaction lock
 1163          * of on-disk inodes on multiple on-disk inode pages by
 1164          * multiple concurrent transactions
 1165          */
 1166         for (k = 0; k < cd.nip; k++) {
 1167                 top = (cd.iplist[k])->i_ino;
 1168                 for (n = k + 1; n < cd.nip; n++) {
 1169                         ip = cd.iplist[n];
 1170                         if (ip->i_ino > top) {
 1171                                 top = ip->i_ino;
 1172                                 cd.iplist[n] = cd.iplist[k];
 1173                                 cd.iplist[k] = ip;
 1174                         }
 1175                 }
 1176 
 1177                 ip = cd.iplist[k];
 1178                 jfs_ip = JFS_IP(ip);
 1179 
 1180                 if (test_and_clear_cflag(COMMIT_Syncdata, ip) &&
 1181                     ((tblk->flag && COMMIT_DELETE) == 0))
 1182                         fsync_inode_data_buffers(ip);
 1183 
 1184                 /*
 1185                  * Mark inode as not dirty.  It will still be on the dirty
 1186                  * inode list, but we'll know not to commit it again unless
 1187                  * it gets marked dirty again
 1188                  */
 1189                 clear_cflag(COMMIT_Dirty, ip);
 1190 
 1191                 /* inherit anonymous tlock(s) of inode */
 1192                 if (jfs_ip->atlhead) {
 1193                         lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
 1194                         tblk->next = jfs_ip->atlhead;
 1195                         if (!tblk->last)
 1196                                 tblk->last = jfs_ip->atltail;
 1197                         jfs_ip->atlhead = jfs_ip->atltail = 0;
 1198                         TXN_LOCK();
 1199                         list_del_init(&jfs_ip->anon_inode_list);
 1200                         TXN_UNLOCK();
 1201                 }
 1202 
 1203                 /*
 1204                  * acquire transaction lock on on-disk inode page
 1205                  * (become first tlock of the tblk's tlock list)
 1206                  */
 1207                 if (((rc = diWrite(tid, ip))))
 1208                         goto out;
 1209         }
 1210 
 1211         /*
 1212          *      write log records from transaction locks
 1213          *
 1214          * txUpdateMap() resets XAD_NEW in XAD.
 1215          */
 1216         if ((rc = txLog(log, tblk, &cd)))
 1217                 goto TheEnd;
 1218 
 1219         /*
 1220          * Ensure that inode isn't reused before
 1221          * lazy commit thread finishes processing
 1222          */
 1223         if (tblk->xflag & (COMMIT_CREATE | COMMIT_DELETE)) {
 1224                 atomic_inc(&tblk->ip->i_count);
 1225                 /*
 1226                  * Avoid a rare deadlock
 1227                  *
 1228                  * If the inode is locked, we may be blocked in
 1229                  * jfs_commit_inode.  If so, we don't want the
 1230                  * lazy_commit thread doing the last iput() on the inode
 1231                  * since that may block on the locked inode.  Instead,
 1232                  * commit the transaction synchronously, so the last iput
 1233                  * will be done by the calling thread (or later)
 1234                  */
 1235                 if (tblk->ip->i_state & I_LOCK)
 1236                         tblk->xflag &= ~COMMIT_LAZY;
 1237         }
 1238 
 1239         ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
 1240                ((tblk->ip->i_nlink == 0) &&
 1241                 !test_cflag(COMMIT_Nolink, tblk->ip)));
 1242 
 1243         /*
 1244          *      write COMMIT log record
 1245          */
 1246         lrd->type = cpu_to_le16(LOG_COMMIT);
 1247         lrd->length = 0;
 1248         lsn = lmLog(log, tblk, lrd, NULL);
 1249 
 1250         lmGroupCommit(log, tblk);
 1251 
 1252         /*
 1253          *      - transaction is now committed -
 1254          */
 1255 
 1256         /*
 1257          * force pages in careful update
 1258          * (imap addressing structure update)
 1259          */
 1260         if (flag & COMMIT_FORCE)
 1261                 txForce(tblk);
 1262 
 1263         /*
 1264          *      update allocation map.
 1265          *
 1266          * update inode allocation map and inode:
 1267          * free pager lock on memory object of inode if any.
 1268          * update  block allocation map.
 1269          *
 1270          * txUpdateMap() resets XAD_NEW in XAD.
 1271          */
 1272         if (tblk->xflag & COMMIT_FORCE)
 1273                 txUpdateMap(tblk);
 1274 
 1275         /*
 1276          *      free transaction locks and pageout/free pages
 1277          */
 1278         txRelease(tblk);
 1279 
 1280         if ((tblk->flag & tblkGC_LAZY) == 0)
 1281                 txUnlock(tblk);
 1282 
 1283 
 1284         /*
 1285          *      reset in-memory object state
 1286          */
 1287         for (k = 0; k < cd.nip; k++) {
 1288                 ip = cd.iplist[k];
 1289                 jfs_ip = JFS_IP(ip);
 1290 
 1291                 /*
 1292                  * reset in-memory inode state
 1293                  */
 1294                 jfs_ip->bxflag = 0;
 1295                 jfs_ip->blid = 0;
 1296         }
 1297 
 1298       out:
 1299         if (rc != 0)
 1300                 txAbortCommit(&cd, rc);
 1301         else
 1302                 rc = rc1;
 1303 
 1304       TheEnd:
 1305         jfs_info("txCommit: tid = %d, returning %d", tid, rc);
 1306         return rc;
 1307 }
 1308 
 1309 
 1310 /*
 1311  * NAME:        txLog()
 1312  *
 1313  * FUNCTION:    Writes AFTER log records for all lines modified
 1314  *              by tid for segments specified by inodes in comdata.
 1315  *              Code assumes only WRITELOCKS are recorded in lockwords.
 1316  *
 1317  * PARAMETERS:
 1318  *
 1319  * RETURN :
 1320  */
 1321 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
 1322 {
 1323         int rc = 0;
 1324         struct inode *ip;
 1325         lid_t lid;
 1326         struct tlock *tlck;
 1327         struct lrd *lrd = &cd->lrd;
 1328 
 1329         /*
 1330          * write log record(s) for each tlock of transaction,
 1331          */
 1332         for (lid = tblk->next; lid; lid = tlck->next) {
 1333                 tlck = lid_to_tlock(lid);
 1334 
 1335                 tlck->flag |= tlckLOG;
 1336 
 1337                 /* initialize lrd common */
 1338                 ip = tlck->ip;
 1339                 lrd->aggregate = cpu_to_le32(kdev_t_to_nr(ip->i_dev));
 1340                 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
 1341                 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
 1342 
 1343                 if (tlck->mp)
 1344                         hold_metapage(tlck->mp, 0);
 1345 
 1346                 /* write log record of page from the tlock */
 1347                 switch (tlck->type & tlckTYPE) {
 1348                 case tlckXTREE:
 1349                         xtLog(log, tblk, lrd, tlck);
 1350                         break;
 1351 
 1352                 case tlckDTREE:
 1353                         dtLog(log, tblk, lrd, tlck);
 1354                         break;
 1355 
 1356                 case tlckINODE:
 1357                         diLog(log, tblk, lrd, tlck, cd);
 1358                         break;
 1359 
 1360                 case tlckMAP:
 1361                         mapLog(log, tblk, lrd, tlck);
 1362                         break;
 1363 
 1364                 case tlckDATA:
 1365                         dataLog(log, tblk, lrd, tlck);
 1366                         break;
 1367 
 1368                 default:
 1369                         jfs_err("UFO tlock:0x%p", tlck);
 1370                 }
 1371                 if (tlck->mp)
 1372                         release_metapage(tlck->mp);
 1373         }
 1374 
 1375         return rc;
 1376 }
 1377 
 1378 
 1379 /*
 1380  *      diLog()
 1381  *
 1382  * function:    log inode tlock and format maplock to update bmap;
 1383  */
 1384 int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 1385           struct tlock * tlck, struct commit * cd)
 1386 {
 1387         int rc = 0;
 1388         struct metapage *mp;
 1389         pxd_t *pxd;
 1390         struct pxd_lock *pxdlock;
 1391 
 1392         mp = tlck->mp;
 1393 
 1394         /* initialize as REDOPAGE record format */
 1395         lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
 1396         lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
 1397 
 1398         pxd = &lrd->log.redopage.pxd;
 1399 
 1400         /*
 1401          *      inode after image
 1402          */
 1403         if (tlck->type & tlckENTRY) {
 1404                 /* log after-image for logredo(): */
 1405                 lrd->type = cpu_to_le16(LOG_REDOPAGE);
 1406 //              *pxd = mp->cm_pxd;
 1407                 PXDaddress(pxd, mp->index);
 1408                 PXDlength(pxd,
 1409                           mp->logical_size >> tblk->sb->s_blocksize_bits);
 1410                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1411 
 1412                 /* mark page as homeward bound */
 1413                 tlck->flag |= tlckWRITEPAGE;
 1414         } else if (tlck->type & tlckFREE) {
 1415                 /*
 1416                  *      free inode extent
 1417                  *
 1418                  * (pages of the freed inode extent have been invalidated and
 1419                  * a maplock for free of the extent has been formatted at
 1420                  * txLock() time);
 1421                  *
 1422                  * the tlock had been acquired on the inode allocation map page
 1423                  * (iag) that specifies the freed extent, even though the map
 1424                  * page is not itself logged, to prevent pageout of the map
 1425                  * page before the log;
 1426                  */
 1427                 assert(tlck->type & tlckFREE);
 1428 
 1429                 /* log LOG_NOREDOINOEXT of the freed inode extent for
 1430                  * logredo() to start NoRedoPage filters, and to update
 1431                  * imap and bmap for free of the extent;
 1432                  */
 1433                 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
 1434                 /*
 1435                  * For the LOG_NOREDOINOEXT record, we need
 1436                  * to pass the IAG number and inode extent
 1437                  * index (within that IAG) from which the
 1438                  * the extent being released.  These have been
 1439                  * passed to us in the iplist[1] and iplist[2].
 1440                  */
 1441                 lrd->log.noredoinoext.iagnum =
 1442                     cpu_to_le32((u32) (size_t) cd->iplist[1]);
 1443                 lrd->log.noredoinoext.inoext_idx =
 1444                     cpu_to_le32((u32) (size_t) cd->iplist[2]);
 1445 
 1446                 pxdlock = (struct pxd_lock *) & tlck->lock;
 1447                 *pxd = pxdlock->pxd;
 1448                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 1449 
 1450                 /* update bmap */
 1451                 tlck->flag |= tlckUPDATEMAP;
 1452 
 1453                 /* mark page as homeward bound */
 1454                 tlck->flag |= tlckWRITEPAGE;
 1455         } else
 1456                 jfs_err("diLog: UFO type tlck:0x%p", tlck);
 1457 #ifdef  _JFS_WIP
 1458         /*
 1459          *      alloc/free external EA extent
 1460          *
 1461          * a maplock for txUpdateMap() to update bPWMAP for alloc/free
 1462          * of the extent has been formatted at txLock() time;
 1463          */
 1464         else {
 1465                 assert(tlck->type & tlckEA);
 1466 
 1467                 /* log LOG_UPDATEMAP for logredo() to update bmap for
 1468                  * alloc of new (and free of old) external EA extent;
 1469                  */
 1470                 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
 1471                 pxdlock = (struct pxd_lock *) & tlck->lock;
 1472                 nlock = pxdlock->index;
 1473                 for (i = 0; i < nlock; i++, pxdlock++) {
 1474                         if (pxdlock->flag & mlckALLOCPXD)
 1475                                 lrd->log.updatemap.type =
 1476                                     cpu_to_le16(LOG_ALLOCPXD);
 1477                         else
 1478                                 lrd->log.updatemap.type =
 1479                                     cpu_to_le16(LOG_FREEPXD);
 1480                         lrd->log.updatemap.nxd = cpu_to_le16(1);
 1481                         lrd->log.updatemap.pxd = pxdlock->pxd;
 1482                         lrd->backchain =
 1483                             cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 1484                 }
 1485 
 1486                 /* update bmap */
 1487                 tlck->flag |= tlckUPDATEMAP;
 1488         }
 1489 #endif                          /* _JFS_WIP */
 1490 
 1491         return rc;
 1492 }
 1493 
 1494 
 1495 /*
 1496  *      dataLog()
 1497  *
 1498  * function:    log data tlock
 1499  */
 1500 int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 1501             struct tlock * tlck)
 1502 {
 1503         struct metapage *mp;
 1504         pxd_t *pxd;
 1505 
 1506         mp = tlck->mp;
 1507 
 1508         /* initialize as REDOPAGE record format */
 1509         lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
 1510         lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
 1511 
 1512         pxd = &lrd->log.redopage.pxd;
 1513 
 1514         /* log after-image for logredo(): */
 1515         lrd->type = cpu_to_le16(LOG_REDOPAGE);
 1516 
 1517         if (JFS_IP(tlck->ip)->next_index < MAX_INLINE_DIRTABLE_ENTRY) {
 1518                 /*
 1519                  * The table has been truncated, we've must have deleted
 1520                  * the last entry, so don't bother logging this
 1521                  */
 1522                 mp->lid = 0;
 1523                 atomic_dec(&mp->nohomeok);
 1524                 discard_metapage(mp);
 1525                 tlck->mp = 0;
 1526                 return 0;
 1527         }
 1528 
 1529         PXDaddress(pxd, mp->index);
 1530         PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
 1531 
 1532         lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1533 
 1534         /* mark page as homeward bound */
 1535         tlck->flag |= tlckWRITEPAGE;
 1536 
 1537         return 0;
 1538 }
 1539 
 1540 
 1541 /*
 1542  *      dtLog()
 1543  *
 1544  * function:    log dtree tlock and format maplock to update bmap;
 1545  */
 1546 void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 1547            struct tlock * tlck)
 1548 {
 1549         struct metapage *mp;
 1550         struct pxd_lock *pxdlock;
 1551         pxd_t *pxd;
 1552 
 1553         mp = tlck->mp;
 1554 
 1555         /* initialize as REDOPAGE/NOREDOPAGE record format */
 1556         lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
 1557         lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
 1558 
 1559         pxd = &lrd->log.redopage.pxd;
 1560 
 1561         if (tlck->type & tlckBTROOT)
 1562                 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
 1563 
 1564         /*
 1565          *      page extension via relocation: entry insertion;
 1566          *      page extension in-place: entry insertion;
 1567          *      new right page from page split, reinitialized in-line
 1568          *      root from root page split: entry insertion;
 1569          */
 1570         if (tlck->type & (tlckNEW | tlckEXTEND)) {
 1571                 /* log after-image of the new page for logredo():
 1572                  * mark log (LOG_NEW) for logredo() to initialize
 1573                  * freelist and update bmap for alloc of the new page;
 1574                  */
 1575                 lrd->type = cpu_to_le16(LOG_REDOPAGE);
 1576                 if (tlck->type & tlckEXTEND)
 1577                         lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
 1578                 else
 1579                         lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
 1580 //              *pxd = mp->cm_pxd;
 1581                 PXDaddress(pxd, mp->index);
 1582                 PXDlength(pxd,
 1583                           mp->logical_size >> tblk->sb->s_blocksize_bits);
 1584                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1585 
 1586                 /* format a maplock for txUpdateMap() to update bPMAP for
 1587                  * alloc of the new page;
 1588                  */
 1589                 if (tlck->type & tlckBTROOT)
 1590                         return;
 1591                 tlck->flag |= tlckUPDATEMAP;
 1592                 pxdlock = (struct pxd_lock *) & tlck->lock;
 1593                 pxdlock->flag = mlckALLOCPXD;
 1594                 pxdlock->pxd = *pxd;
 1595 
 1596                 pxdlock->index = 1;
 1597 
 1598                 /* mark page as homeward bound */
 1599                 tlck->flag |= tlckWRITEPAGE;
 1600                 return;
 1601         }
 1602 
 1603         /*
 1604          *      entry insertion/deletion,
 1605          *      sibling page link update (old right page before split);
 1606          */
 1607         if (tlck->type & (tlckENTRY | tlckRELINK)) {
 1608                 /* log after-image for logredo(): */
 1609                 lrd->type = cpu_to_le16(LOG_REDOPAGE);
 1610                 PXDaddress(pxd, mp->index);
 1611                 PXDlength(pxd,
 1612                           mp->logical_size >> tblk->sb->s_blocksize_bits);
 1613                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1614 
 1615                 /* mark page as homeward bound */
 1616                 tlck->flag |= tlckWRITEPAGE;
 1617                 return;
 1618         }
 1619 
 1620         /*
 1621          *      page deletion: page has been invalidated
 1622          *      page relocation: source extent
 1623          *
 1624          *      a maplock for free of the page has been formatted
 1625          *      at txLock() time);
 1626          */
 1627         if (tlck->type & (tlckFREE | tlckRELOCATE)) {
 1628                 /* log LOG_NOREDOPAGE of the deleted page for logredo()
 1629                  * to start NoRedoPage filter and to update bmap for free
 1630                  * of the deletd page
 1631                  */
 1632                 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
 1633                 pxdlock = (struct pxd_lock *) & tlck->lock;
 1634                 *pxd = pxdlock->pxd;
 1635                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 1636 
 1637                 /* a maplock for txUpdateMap() for free of the page
 1638                  * has been formatted at txLock() time;
 1639                  */
 1640                 tlck->flag |= tlckUPDATEMAP;
 1641         }
 1642         return;
 1643 }
 1644 
 1645 
 1646 /*
 1647  *      xtLog()
 1648  *
 1649  * function:    log xtree tlock and format maplock to update bmap;
 1650  */
 1651 void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 1652            struct tlock * tlck)
 1653 {
 1654         struct inode *ip;
 1655         struct metapage *mp;
 1656         xtpage_t *p;
 1657         struct xtlock *xtlck;
 1658         struct maplock *maplock;
 1659         struct xdlistlock *xadlock;
 1660         struct pxd_lock *pxdlock;
 1661         pxd_t *pxd;
 1662         int next, lwm, hwm;
 1663 
 1664         ip = tlck->ip;
 1665         mp = tlck->mp;
 1666 
 1667         /* initialize as REDOPAGE/NOREDOPAGE record format */
 1668         lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
 1669         lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
 1670 
 1671         pxd = &lrd->log.redopage.pxd;
 1672 
 1673         if (tlck->type & tlckBTROOT) {
 1674                 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
 1675                 p = &JFS_IP(ip)->i_xtroot;
 1676                 if (S_ISDIR(ip->i_mode))
 1677                         lrd->log.redopage.type |=
 1678                             cpu_to_le16(LOG_DIR_XTREE);
 1679         } else
 1680                 p = (xtpage_t *) mp->data;
 1681         next = le16_to_cpu(p->header.nextindex);
 1682 
 1683         xtlck = (struct xtlock *) & tlck->lock;
 1684 
 1685         maplock = (struct maplock *) & tlck->lock;
 1686         xadlock = (struct xdlistlock *) maplock;
 1687 
 1688         /*
 1689          *      entry insertion/extension;
 1690          *      sibling page link update (old right page before split);
 1691          */
 1692         if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
 1693                 /* log after-image for logredo():
 1694                  * logredo() will update bmap for alloc of new/extended
 1695                  * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
 1696                  * after-image of XADlist;
 1697                  * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
 1698                  * applying the after-image to the meta-data page.
 1699                  */
 1700                 lrd->type = cpu_to_le16(LOG_REDOPAGE);
 1701 //              *pxd = mp->cm_pxd;
 1702                 PXDaddress(pxd, mp->index);
 1703                 PXDlength(pxd,
 1704                           mp->logical_size >> tblk->sb->s_blocksize_bits);
 1705                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1706 
 1707                 /* format a maplock for txUpdateMap() to update bPMAP
 1708                  * for alloc of new/extended extents of XAD[lwm:next)
 1709                  * from the page itself;
 1710                  * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
 1711                  */
 1712                 lwm = xtlck->lwm.offset;
 1713                 if (lwm == 0)
 1714                         lwm = XTPAGEMAXSLOT;
 1715 
 1716                 if (lwm == next)
 1717                         goto out;
 1718                 assert(lwm < next);
 1719                 tlck->flag |= tlckUPDATEMAP;
 1720                 xadlock->flag = mlckALLOCXADLIST;
 1721                 xadlock->count = next - lwm;
 1722                 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
 1723                         int i;
 1724                         /*
 1725                          * Lazy commit may allow xtree to be modified before
 1726                          * txUpdateMap runs.  Copy xad into linelock to
 1727                          * preserve correct data.
 1728                          */
 1729                         xadlock->xdlist = &xtlck->pxdlock;
 1730                         memcpy(xadlock->xdlist, &p->xad[lwm],
 1731                                sizeof(xad_t) * xadlock->count);
 1732 
 1733                         for (i = 0; i < xadlock->count; i++)
 1734                                 p->xad[lwm + i].flag &=
 1735                                     ~(XAD_NEW | XAD_EXTENDED);
 1736                 } else {
 1737                         /*
 1738                          * xdlist will point to into inode's xtree, ensure
 1739                          * that transaction is not committed lazily.
 1740                          */
 1741                         xadlock->xdlist = &p->xad[lwm];
 1742                         tblk->xflag &= ~COMMIT_LAZY;
 1743                 }
 1744                 jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
 1745                          "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
 1746 
 1747                 maplock->index = 1;
 1748 
 1749               out:
 1750                 /* mark page as homeward bound */
 1751                 tlck->flag |= tlckWRITEPAGE;
 1752 
 1753                 return;
 1754         }
 1755 
 1756         /*
 1757          *      page deletion: file deletion/truncation (ref. xtTruncate())
 1758          *
 1759          * (page will be invalidated after log is written and bmap
 1760          * is updated from the page);
 1761          */
 1762         if (tlck->type & tlckFREE) {
 1763                 /* LOG_NOREDOPAGE log for NoRedoPage filter:
 1764                  * if page free from file delete, NoRedoFile filter from
 1765                  * inode image of zero link count will subsume NoRedoPage
 1766                  * filters for each page;
 1767                  * if page free from file truncattion, write NoRedoPage
 1768                  * filter;
 1769                  *
 1770                  * upadte of block allocation map for the page itself:
 1771                  * if page free from deletion and truncation, LOG_UPDATEMAP
 1772                  * log for the page itself is generated from processing
 1773                  * its parent page xad entries;
 1774                  */
 1775                 /* if page free from file truncation, log LOG_NOREDOPAGE
 1776                  * of the deleted page for logredo() to start NoRedoPage
 1777                  * filter for the page;
 1778                  */
 1779                 if (tblk->xflag & COMMIT_TRUNCATE) {
 1780                         /* write NOREDOPAGE for the page */
 1781                         lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
 1782                         PXDaddress(pxd, mp->index);
 1783                         PXDlength(pxd,
 1784                                   mp->logical_size >> tblk->sb->
 1785                                   s_blocksize_bits);
 1786                         lrd->backchain =
 1787                             cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 1788 
 1789                         if (tlck->type & tlckBTROOT) {
 1790                                 /* Empty xtree must be logged */
 1791                                 lrd->type = cpu_to_le16(LOG_REDOPAGE);
 1792                                 lrd->backchain =
 1793                                     cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1794                         }
 1795                 }
 1796 
 1797                 /* init LOG_UPDATEMAP of the freed extents
 1798                  * XAD[XTENTRYSTART:hwm) from the deleted page itself
 1799                  * for logredo() to update bmap;
 1800                  */
 1801                 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
 1802                 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
 1803                 xtlck = (struct xtlock *) & tlck->lock;
 1804                 hwm = xtlck->hwm.offset;
 1805                 lrd->log.updatemap.nxd =
 1806                     cpu_to_le16(hwm - XTENTRYSTART + 1);
 1807                 /* reformat linelock for lmLog() */
 1808                 xtlck->header.offset = XTENTRYSTART;
 1809                 xtlck->header.length = hwm - XTENTRYSTART + 1;
 1810                 xtlck->index = 1;
 1811                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1812 
 1813                 /* format a maplock for txUpdateMap() to update bmap
 1814                  * to free extents of XAD[XTENTRYSTART:hwm) from the
 1815                  * deleted page itself;
 1816                  */
 1817                 tlck->flag |= tlckUPDATEMAP;
 1818                 xadlock->flag = mlckFREEXADLIST;
 1819                 xadlock->count = hwm - XTENTRYSTART + 1;
 1820                 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
 1821                         /*
 1822                          * Lazy commit may allow xtree to be modified before
 1823                          * txUpdateMap runs.  Copy xad into linelock to
 1824                          * preserve correct data.
 1825                          */
 1826                         xadlock->xdlist = &xtlck->pxdlock;
 1827                         memcpy(xadlock->xdlist, &p->xad[XTENTRYSTART],
 1828                                sizeof(xad_t) * xadlock->count);
 1829                 } else {
 1830                         /*
 1831                          * xdlist will point to into inode's xtree, ensure
 1832                          * that transaction is not committed lazily.
 1833                          */
 1834                         xadlock->xdlist = &p->xad[XTENTRYSTART];
 1835                         tblk->xflag &= ~COMMIT_LAZY;
 1836                 }
 1837                 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
 1838                          tlck->ip, mp, xadlock->count);
 1839 
 1840                 maplock->index = 1;
 1841 
 1842                 /* mark page as invalid */
 1843                 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
 1844                     && !(tlck->type & tlckBTROOT))
 1845                         tlck->flag |= tlckFREEPAGE;
 1846                 /*
 1847                    else (tblk->xflag & COMMIT_PMAP)
 1848                    ? release the page;
 1849                  */
 1850                 return;
 1851         }
 1852 
 1853         /*
 1854          *      page/entry truncation: file truncation (ref. xtTruncate())
 1855          *
 1856          *     |----------+------+------+---------------|
 1857          *                |      |      |
 1858          *                |      |     hwm - hwm before truncation
 1859          *                |     next - truncation point
 1860          *               lwm - lwm before truncation
 1861          * header ?
 1862          */
 1863         if (tlck->type & tlckTRUNCATE) {
 1864                 pxd_t tpxd;     /* truncated extent of xad */
 1865                 int twm;
 1866 
 1867                 /*
 1868                  * For truncation the entire linelock may be used, so it would
 1869                  * be difficult to store xad list in linelock itself.
 1870                  * Therefore, we'll just force transaction to be committed
 1871                  * synchronously, so that xtree pages won't be changed before
 1872                  * txUpdateMap runs.
 1873                  */
 1874                 tblk->xflag &= ~COMMIT_LAZY;
 1875                 lwm = xtlck->lwm.offset;
 1876                 if (lwm == 0)
 1877                         lwm = XTPAGEMAXSLOT;
 1878                 hwm = xtlck->hwm.offset;
 1879                 twm = xtlck->twm.offset;
 1880 
 1881                 /*
 1882                  *      write log records
 1883                  */
 1884                 /*
 1885                  * allocate entries XAD[lwm:next]:
 1886                  */
 1887                 if (lwm < next) {
 1888                         /* log after-image for logredo():
 1889                          * logredo() will update bmap for alloc of new/extended
 1890                          * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
 1891                          * after-image of XADlist;
 1892                          * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
 1893                          * applying the after-image to the meta-data page.
 1894                          */
 1895                         lrd->type = cpu_to_le16(LOG_REDOPAGE);
 1896                         PXDaddress(pxd, mp->index);
 1897                         PXDlength(pxd,
 1898                                   mp->logical_size >> tblk->sb->
 1899                                   s_blocksize_bits);
 1900                         lrd->backchain =
 1901                             cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1902                 }
 1903 
 1904                 /*
 1905                  * truncate entry XAD[twm == next - 1]:
 1906                  */
 1907                 if (twm == next - 1) {
 1908                         /* init LOG_UPDATEMAP for logredo() to update bmap for
 1909                          * free of truncated delta extent of the truncated
 1910                          * entry XAD[next - 1]:
 1911                          * (xtlck->pxdlock = truncated delta extent);
 1912                          */
 1913                         pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
 1914                         /* assert(pxdlock->type & tlckTRUNCATE); */
 1915                         lrd->type = cpu_to_le16(LOG_UPDATEMAP);
 1916                         lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
 1917                         lrd->log.updatemap.nxd = cpu_to_le16(1);
 1918                         lrd->log.updatemap.pxd = pxdlock->pxd;
 1919                         tpxd = pxdlock->pxd;    /* save to format maplock */
 1920                         lrd->backchain =
 1921                             cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 1922                 }
 1923 
 1924                 /*
 1925                  * free entries XAD[next:hwm]:
 1926                  */
 1927                 if (hwm >= next) {
 1928                         /* init LOG_UPDATEMAP of the freed extents
 1929                          * XAD[next:hwm] from the deleted page itself
 1930                          * for logredo() to update bmap;
 1931                          */
 1932                         lrd->type = cpu_to_le16(LOG_UPDATEMAP);
 1933                         lrd->log.updatemap.type =
 1934                             cpu_to_le16(LOG_FREEXADLIST);
 1935                         xtlck = (struct xtlock *) & tlck->lock;
 1936                         hwm = xtlck->hwm.offset;
 1937                         lrd->log.updatemap.nxd =
 1938                             cpu_to_le16(hwm - next + 1);
 1939                         /* reformat linelock for lmLog() */
 1940                         xtlck->header.offset = next;
 1941                         xtlck->header.length = hwm - next + 1;
 1942                         xtlck->index = 1;
 1943                         lrd->backchain =
 1944                             cpu_to_le32(lmLog(log, tblk, lrd, tlck));
 1945                 }
 1946 
 1947                 /*
 1948                  *      format maplock(s) for txUpdateMap() to update bmap
 1949                  */
 1950                 maplock->index = 0;
 1951 
 1952                 /*
 1953                  * allocate entries XAD[lwm:next):
 1954                  */
 1955                 if (lwm < next) {
 1956                         /* format a maplock for txUpdateMap() to update bPMAP
 1957                          * for alloc of new/extended extents of XAD[lwm:next)
 1958                          * from the page itself;
 1959                          * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
 1960                          */
 1961                         tlck->flag |= tlckUPDATEMAP;
 1962                         xadlock->flag = mlckALLOCXADLIST;
 1963                         xadlock->count = next - lwm;
 1964                         xadlock->xdlist = &p->xad[lwm];
 1965 
 1966                         jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
 1967                                  "lwm:%d next:%d",
 1968                                  tlck->ip, mp, xadlock->count, lwm, next);
 1969                         maplock->index++;
 1970                         xadlock++;
 1971                 }
 1972 
 1973                 /*
 1974                  * truncate entry XAD[twm == next - 1]:
 1975                  */
 1976                 if (twm == next - 1) {
 1977                         struct pxd_lock *pxdlock;
 1978 
 1979                         /* format a maplock for txUpdateMap() to update bmap
 1980                          * to free truncated delta extent of the truncated
 1981                          * entry XAD[next - 1];
 1982                          * (xtlck->pxdlock = truncated delta extent);
 1983                          */
 1984                         tlck->flag |= tlckUPDATEMAP;
 1985                         pxdlock = (struct pxd_lock *) xadlock;
 1986                         pxdlock->flag = mlckFREEPXD;
 1987                         pxdlock->count = 1;
 1988                         pxdlock->pxd = tpxd;
 1989 
 1990                         jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
 1991                                  "hwm:%d", ip, mp, pxdlock->count, hwm);
 1992                         maplock->index++;
 1993                         xadlock++;
 1994                 }
 1995 
 1996                 /*
 1997                  * free entries XAD[next:hwm]:
 1998                  */
 1999                 if (hwm >= next) {
 2000                         /* format a maplock for txUpdateMap() to update bmap
 2001                          * to free extents of XAD[next:hwm] from thedeleted
 2002                          * page itself;
 2003                          */
 2004                         tlck->flag |= tlckUPDATEMAP;
 2005                         xadlock->flag = mlckFREEXADLIST;
 2006                         xadlock->count = hwm - next + 1;
 2007                         xadlock->xdlist = &p->xad[next];
 2008 
 2009                         jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
 2010                                  "next:%d hwm:%d",
 2011                                  tlck->ip, mp, xadlock->count, next, hwm);
 2012                         maplock->index++;
 2013                 }
 2014 
 2015                 /* mark page as homeward bound */
 2016                 tlck->flag |= tlckWRITEPAGE;
 2017         }
 2018         return;
 2019 }
 2020 
 2021 
 2022 /*
 2023  *      mapLog()
 2024  *
 2025  * function:    log from maplock of freed data extents;
 2026  */
 2027 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 2028             struct tlock * tlck)
 2029 {
 2030         struct pxd_lock *pxdlock;
 2031         int i, nlock;
 2032         pxd_t *pxd;
 2033 
 2034         /*
 2035          *      page relocation: free the source page extent
 2036          *
 2037          * a maplock for txUpdateMap() for free of the page
 2038          * has been formatted at txLock() time saving the src
 2039          * relocated page address;
 2040          */
 2041         if (tlck->type & tlckRELOCATE) {
 2042                 /* log LOG_NOREDOPAGE of the old relocated page
 2043                  * for logredo() to start NoRedoPage filter;
 2044                  */
 2045                 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
 2046                 pxdlock = (struct pxd_lock *) & tlck->lock;
 2047                 pxd = &lrd->log.redopage.pxd;
 2048                 *pxd = pxdlock->pxd;
 2049                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 2050 
 2051                 /* (N.B. currently, logredo() does NOT update bmap
 2052                  * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
 2053                  * if page free from relocation, LOG_UPDATEMAP log is
 2054                  * specifically generated now for logredo()
 2055                  * to update bmap for free of src relocated page;
 2056                  * (new flag LOG_RELOCATE may be introduced which will
 2057                  * inform logredo() to start NORedoPage filter and also
 2058                  * update block allocation map at the same time, thus
 2059                  * avoiding an extra log write);
 2060                  */
 2061                 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
 2062                 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
 2063                 lrd->log.updatemap.nxd = cpu_to_le16(1);
 2064                 lrd->log.updatemap.pxd = pxdlock->pxd;
 2065                 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 2066 
 2067                 /* a maplock for txUpdateMap() for free of the page
 2068                  * has been formatted at txLock() time;
 2069                  */
 2070                 tlck->flag |= tlckUPDATEMAP;
 2071                 return;
 2072         }
 2073         /*
 2074 
 2075          * Otherwise it's not a relocate request
 2076          *
 2077          */
 2078         else {
 2079                 /* log LOG_UPDATEMAP for logredo() to update bmap for
 2080                  * free of truncated/relocated delta extent of the data;
 2081                  * e.g.: external EA extent, relocated/truncated extent
 2082                  * from xtTailgate();
 2083                  */
 2084                 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
 2085                 pxdlock = (struct pxd_lock *) & tlck->lock;
 2086                 nlock = pxdlock->index;
 2087                 for (i = 0; i < nlock; i++, pxdlock++) {
 2088                         if (pxdlock->flag & mlckALLOCPXD)
 2089                                 lrd->log.updatemap.type =
 2090                                     cpu_to_le16(LOG_ALLOCPXD);
 2091                         else
 2092                                 lrd->log.updatemap.type =
 2093                                     cpu_to_le16(LOG_FREEPXD);
 2094                         lrd->log.updatemap.nxd = cpu_to_le16(1);
 2095                         lrd->log.updatemap.pxd = pxdlock->pxd;
 2096                         lrd->backchain =
 2097                             cpu_to_le32(lmLog(log, tblk, lrd, NULL));
 2098                         jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
 2099                                  (ulong) addressPXD(&pxdlock->pxd),
 2100                                  lengthPXD(&pxdlock->pxd));
 2101                 }
 2102 
 2103                 /* update bmap */
 2104                 tlck->flag |= tlckUPDATEMAP;
 2105         }
 2106 }
 2107 
 2108 
 2109 /*
 2110  *      txEA()
 2111  *
 2112  * function:    acquire maplock for EA/ACL extents or
 2113  *              set COMMIT_INLINE flag;
 2114  */
 2115 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
 2116 {
 2117         struct tlock *tlck = NULL;
 2118         struct pxd_lock *maplock = NULL, *pxdlock = NULL;
 2119 
 2120         /*
 2121          * format maplock for alloc of new EA extent
 2122          */
 2123         if (newea) {
 2124                 /* Since the newea could be a completely zeroed entry we need to
 2125                  * check for the two flags which indicate we should actually
 2126                  * commit new EA data
 2127                  */
 2128                 if (newea->flag & DXD_EXTENT) {
 2129                         tlck = txMaplock(tid, ip, tlckMAP);
 2130                         maplock = (struct pxd_lock *) & tlck->lock;
 2131                         pxdlock = (struct pxd_lock *) maplock;
 2132                         pxdlock->flag = mlckALLOCPXD;
 2133                         PXDaddress(&pxdlock->pxd, addressDXD(newea));
 2134                         PXDlength(&pxdlock->pxd, lengthDXD(newea));
 2135                         pxdlock++;
 2136                         maplock->index = 1;
 2137                 } else if (newea->flag & DXD_INLINE) {
 2138                         tlck = NULL;
 2139 
 2140                         set_cflag(COMMIT_Inlineea, ip);
 2141                 }
 2142         }
 2143 
 2144         /*
 2145          * format maplock for free of old EA extent
 2146          */
 2147         if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
 2148                 if (tlck == NULL) {
 2149                         tlck = txMaplock(tid, ip, tlckMAP);
 2150                         maplock = (struct pxd_lock *) & tlck->lock;
 2151                         pxdlock = (struct pxd_lock *) maplock;
 2152                         maplock->index = 0;
 2153                 }
 2154                 pxdlock->flag = mlckFREEPXD;
 2155                 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
 2156                 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
 2157                 maplock->index++;
 2158         }
 2159 }
 2160 
 2161 
 2162 /*
 2163  *      txForce()
 2164  *
 2165  * function: synchronously write pages locked by transaction
 2166  *              after txLog() but before txUpdateMap();
 2167  */
 2168 void txForce(struct tblock * tblk)
 2169 {
 2170         struct tlock *tlck;
 2171         lid_t lid, next;
 2172         struct metapage *mp;
 2173 
 2174         /*
 2175          * reverse the order of transaction tlocks in
 2176          * careful update order of address index pages
 2177          * (right to left, bottom up)
 2178          */
 2179         tlck = lid_to_tlock(tblk->next);
 2180         lid = tlck->next;
 2181         tlck->next = 0;
 2182         while (lid) {
 2183                 tlck = lid_to_tlock(lid);
 2184                 next = tlck->next;
 2185                 tlck->next = tblk->next;
 2186                 tblk->next = lid;
 2187                 lid = next;
 2188         }
 2189 
 2190         /*
 2191          * synchronously write the page, and
 2192          * hold the page for txUpdateMap();
 2193          */
 2194         for (lid = tblk->next; lid; lid = next) {
 2195                 tlck = lid_to_tlock(lid);
 2196                 next = tlck->next;
 2197 
 2198                 if ((mp = tlck->mp) != NULL &&
 2199                     (tlck->type & tlckBTROOT) == 0) {
 2200                         assert(mp->xflag & COMMIT_PAGE);
 2201 
 2202                         if (tlck->flag & tlckWRITEPAGE) {
 2203                                 tlck->flag &= ~tlckWRITEPAGE;
 2204 
 2205                                 /* do not release page to freelist */
 2206 
 2207                                 /*
 2208                                  * The "right" thing to do here is to
 2209                                  * synchronously write the metadata.
 2210                                  * With the current implementation this
 2211                                  * is hard since write_metapage requires
 2212                                  * us to kunmap & remap the page.  If we
 2213                                  * have tlocks pointing into the metadata
 2214                                  * pages, we don't want to do this.  I think
 2215                                  * we can get by with synchronously writing
 2216                                  * the pages when they are released.
 2217                                  */
 2218                                 assert(atomic_read(&mp->nohomeok));
 2219                                 set_bit(META_dirty, &mp->flag);
 2220                                 set_bit(META_sync, &mp->flag);
 2221                         }
 2222                 }
 2223         }
 2224 }
 2225 
 2226 
 2227 /*
 2228  *      txUpdateMap()
 2229  *
 2230  * function:    update persistent allocation map (and working map
 2231  *              if appropriate);
 2232  *
 2233  * parameter:
 2234  */
 2235 static void txUpdateMap(struct tblock * tblk)
 2236 {
 2237         struct inode *ip;
 2238         struct inode *ipimap;
 2239         lid_t lid;
 2240         struct tlock *tlck;
 2241         struct maplock *maplock;
 2242         struct pxd_lock pxdlock;
 2243         int maptype;
 2244         int k, nlock;
 2245         struct metapage *mp = 0;
 2246 
 2247         ipimap = JFS_SBI(tblk->sb)->ipimap;
 2248 
 2249         maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
 2250 
 2251 
 2252         /*
 2253          *      update block allocation map
 2254          *
 2255          * update allocation state in pmap (and wmap) and
 2256          * update lsn of the pmap page;
 2257          */
 2258         /*
 2259          * scan each tlock/page of transaction for block allocation/free:
 2260          *
 2261          * for each tlock/page of transaction, update map.
 2262          *  ? are there tlock for pmap and pwmap at the same time ?
 2263          */
 2264         for (lid = tblk->next; lid; lid = tlck->next) {
 2265                 tlck = lid_to_tlock(lid);
 2266 
 2267                 if ((tlck->flag & tlckUPDATEMAP) == 0)
 2268                         continue;
 2269 
 2270                 if (tlck->flag & tlckFREEPAGE) {
 2271                         /*
 2272                          * Another thread may attempt to reuse freed space
 2273                          * immediately, so we want to get rid of the metapage
 2274                          * before anyone else has a chance to get it.
 2275                          * Lock metapage, update maps, then invalidate
 2276                          * the metapage.
 2277                          */
 2278                         mp = tlck->mp;
 2279                         ASSERT(mp->xflag & COMMIT_PAGE);
 2280                         hold_metapage(mp, 0);
 2281                 }
 2282 
 2283                 /*
 2284                  * extent list:
 2285                  * . in-line PXD list:
 2286                  * . out-of-line XAD list:
 2287                  */
 2288                 maplock = (struct maplock *) & tlck->lock;
 2289                 nlock = maplock->index;
 2290 
 2291                 for (k = 0; k < nlock; k++, maplock++) {
 2292                         /*
 2293                          * allocate blocks in persistent map:
 2294                          *
 2295                          * blocks have been allocated from wmap at alloc time;
 2296                          */
 2297                         if (maplock->flag & mlckALLOC) {
 2298                                 txAllocPMap(ipimap, maplock, tblk);
 2299                         }
 2300                         /*
 2301                          * free blocks in persistent and working map:
 2302                          * blocks will be freed in pmap and then in wmap;
 2303                          *
 2304                          * ? tblock specifies the PMAP/PWMAP based upon
 2305                          * transaction
 2306                          *
 2307                          * free blocks in persistent map:
 2308                          * blocks will be freed from wmap at last reference
 2309                          * release of the object for regular files;
 2310                          *
 2311                          * Alway free blocks from both persistent & working
 2312                          * maps for directories
 2313                          */
 2314                         else {  /* (maplock->flag & mlckFREE) */
 2315 
 2316                                 if (S_ISDIR(tlck->ip->i_mode))
 2317                                         txFreeMap(ipimap, maplock,
 2318                                                   tblk, COMMIT_PWMAP);
 2319                                 else
 2320                                         txFreeMap(ipimap, maplock,
 2321                                                   tblk, maptype);
 2322                         }
 2323                 }
 2324                 if (tlck->flag & tlckFREEPAGE) {
 2325                         if (!(tblk->flag & tblkGC_LAZY)) {
 2326                                 /* This is equivalent to txRelease */
 2327                                 ASSERT(mp->lid == lid);
 2328                                 tlck->mp->lid = 0;
 2329                         }
 2330                         assert(atomic_read(&mp->nohomeok) == 1);
 2331                         atomic_dec(&mp->nohomeok);
 2332                         discard_metapage(mp);
 2333                         tlck->mp = 0;
 2334                 }
 2335         }
 2336         /*
 2337          *      update inode allocation map
 2338          *
 2339          * update allocation state in pmap and
 2340          * update lsn of the pmap page;
 2341          * update in-memory inode flag/state
 2342          *
 2343          * unlock mapper/write lock
 2344          */
 2345         if (tblk->xflag & COMMIT_CREATE) {
 2346                 ip = tblk->ip;
 2347 
 2348                 ASSERT(test_cflag(COMMIT_New, ip));
 2349                 clear_cflag(COMMIT_New, ip);
 2350 
 2351                 diUpdatePMap(ipimap, ip->i_ino, FALSE, tblk);
 2352                 ipimap->i_state |= I_DIRTY;
 2353                 /* update persistent block allocation map
 2354                  * for the allocation of inode extent;
 2355                  */
 2356                 pxdlock.flag = mlckALLOCPXD;
 2357                 pxdlock.pxd = JFS_IP(ip)->ixpxd;
 2358                 pxdlock.index = 1;
 2359                 txAllocPMap(ip, (struct maplock *) & pxdlock, tblk);
 2360                 iput(ip);
 2361         } else if (tblk->xflag & COMMIT_DELETE) {
 2362                 ip = tblk->ip;
 2363                 diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);
 2364                 ipimap->i_state |= I_DIRTY;
 2365                 iput(ip);
 2366         }
 2367 }
 2368 
 2369 
 2370 /*
 2371  *      txAllocPMap()
 2372  *
 2373  * function: allocate from persistent map;
 2374  *
 2375  * parameter:
 2376  *      ipbmap  -
 2377  *      malock -
 2378  *              xad list:
 2379  *              pxd:
 2380  *
 2381  *      maptype -
 2382  *              allocate from persistent map;
 2383  *              free from persistent map;
 2384  *              (e.g., tmp file - free from working map at releae
 2385  *               of last reference);
 2386  *              free from persistent and working map;
 2387  *
 2388  *      lsn     - log sequence number;
 2389  */
 2390 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
 2391                         struct tblock * tblk)
 2392 {
 2393         struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
 2394         struct xdlistlock *xadlistlock;
 2395         xad_t *xad;
 2396         s64 xaddr;
 2397         int xlen;
 2398         struct pxd_lock *pxdlock;
 2399         struct xdlistlock *pxdlistlock;
 2400         pxd_t *pxd;
 2401         int n;
 2402 
 2403         /*
 2404          * allocate from persistent map;
 2405          */
 2406         if (maplock->flag & mlckALLOCXADLIST) {
 2407                 xadlistlock = (struct xdlistlock *) maplock;
 2408                 xad = xadlistlock->xdlist;
 2409                 for (n = 0; n < xadlistlock->count; n++, xad++) {
 2410                         if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
 2411                                 xaddr = addressXAD(xad);
 2412                                 xlen = lengthXAD(xad);
 2413                                 dbUpdatePMap(ipbmap, FALSE, xaddr,
 2414                                              (s64) xlen, tblk);
 2415                                 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
 2416                                 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
 2417                                          (ulong) xaddr, xlen);
 2418                         }
 2419                 }
 2420         } else if (maplock->flag & mlckALLOCPXD) {
 2421                 pxdlock = (struct pxd_lock *) maplock;
 2422                 xaddr = addressPXD(&pxdlock->pxd);
 2423                 xlen = lengthPXD(&pxdlock->pxd);
 2424                 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
 2425                 jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
 2426         } else {                /* (maplock->flag & mlckALLOCPXDLIST) */
 2427 
 2428                 pxdlistlock = (struct xdlistlock *) maplock;
 2429                 pxd = pxdlistlock->xdlist;
 2430                 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
 2431                         xaddr = addressPXD(pxd);
 2432                         xlen = lengthPXD(pxd);
 2433                         dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,
 2434                                      tblk);
 2435                         jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
 2436                                  (ulong) xaddr, xlen);
 2437                 }
 2438         }
 2439 }
 2440 
 2441 
 2442 /*
 2443  *      txFreeMap()
 2444  *
 2445  * function:    free from persistent and/or working map;
 2446  *
 2447  * todo: optimization
 2448  */
 2449 void txFreeMap(struct inode *ip,
 2450                struct maplock * maplock, struct tblock * tblk, int maptype)
 2451 {
 2452         struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
 2453         struct xdlistlock *xadlistlock;
 2454         xad_t *xad;
 2455         s64 xaddr;
 2456         int xlen;
 2457         struct pxd_lock *pxdlock;
 2458         struct xdlistlock *pxdlistlock;
 2459         pxd_t *pxd;
 2460         int n;
 2461 
 2462         jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
 2463                  tblk, maplock, maptype);
 2464 
 2465         /*
 2466          * free from persistent map;
 2467          */
 2468         if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
 2469                 if (maplock->flag & mlckFREEXADLIST) {
 2470                         xadlistlock = (struct xdlistlock *) maplock;
 2471                         xad = xadlistlock->xdlist;
 2472                         for (n = 0; n < xadlistlock->count; n++, xad++) {
 2473                                 if (!(xad->flag & XAD_NEW)) {
 2474                                         xaddr = addressXAD(xad);
 2475                                         xlen = lengthXAD(xad);
 2476                                         dbUpdatePMap(ipbmap, TRUE, xaddr,
 2477                                                      (s64) xlen, tblk);
 2478                                         jfs_info("freePMap: xaddr:0x%lx "
 2479                                                  "xlen:%d",
 2480                                                  (ulong) xaddr, xlen);
 2481                                 }
 2482                         }
 2483                 } else if (maplock->flag & mlckFREEPXD) {
 2484                         pxdlock = (struct pxd_lock *) maplock;
 2485                         xaddr = addressPXD(&pxdlock->pxd);
 2486                         xlen = lengthPXD(&pxdlock->pxd);
 2487                         dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
 2488                                      tblk);
 2489                         jfs_info("freePMap: xaddr:0x%lx xlen:%d",
 2490                                  (ulong) xaddr, xlen);
 2491                 } else {        /* (maplock->flag & mlckALLOCPXDLIST) */
 2492 
 2493                         pxdlistlock = (struct xdlistlock *) maplock;
 2494                         pxd = pxdlistlock->xdlist;
 2495                         for (n = 0; n < pxdlistlock->count; n++, pxd++) {
 2496                                 xaddr = addressPXD(pxd);
 2497                                 xlen = lengthPXD(pxd);
 2498                                 dbUpdatePMap(ipbmap, TRUE, xaddr,
 2499                                              (s64) xlen, tblk);
 2500                                 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
 2501                                          (ulong) xaddr, xlen);
 2502                         }
 2503                 }
 2504         }
 2505 
 2506         /*
 2507          * free from working map;
 2508          */
 2509         if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
 2510                 if (maplock->flag & mlckFREEXADLIST) {
 2511                         xadlistlock = (struct xdlistlock *) maplock;
 2512                         xad = xadlistlock->xdlist;
 2513                         for (n = 0; n < xadlistlock->count; n++, xad++) {
 2514                                 xaddr = addressXAD(xad);
 2515                                 xlen = lengthXAD(xad);
 2516                                 dbFree(ip, xaddr, (s64) xlen);
 2517                                 xad->flag = 0;
 2518                                 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
 2519                                          (ulong) xaddr, xlen);
 2520                         }
 2521                 } else if (maplock->flag & mlckFREEPXD) {
 2522                         pxdlock = (struct pxd_lock *) maplock;
 2523                         xaddr = addressPXD(&pxdlock->pxd);
 2524                         xlen = lengthPXD(&pxdlock->pxd);
 2525                         dbFree(ip, xaddr, (s64) xlen);
 2526                         jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
 2527                                  (ulong) xaddr, xlen);
 2528                 } else {        /* (maplock->flag & mlckFREEPXDLIST) */
 2529 
 2530                         pxdlistlock = (struct xdlistlock *) maplock;
 2531                         pxd = pxdlistlock->xdlist;
 2532                         for (n = 0; n < pxdlistlock->count; n++, pxd++) {
 2533                                 xaddr = addressPXD(pxd);
 2534                                 xlen = lengthPXD(pxd);
 2535                                 dbFree(ip, xaddr, (s64) xlen);
 2536                                 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
 2537                                          (ulong) xaddr, xlen);
 2538                         }
 2539                 }
 2540         }
 2541 }
 2542 
 2543 
 2544 /*
 2545  *      txFreelock()
 2546  *
 2547  * function:    remove tlock from inode anonymous locklist
 2548  */
 2549 void txFreelock(struct inode *ip)
 2550 {
 2551         struct jfs_inode_info *jfs_ip = JFS_IP(ip);
 2552         struct tlock *xtlck, *tlck;
 2553         lid_t xlid = 0, lid;
 2554 
 2555         if (!jfs_ip->atlhead)
 2556                 return;
 2557 
 2558         xtlck = (struct tlock *) &jfs_ip->atlhead;
 2559 
 2560         while ((lid = xtlck->next)) {
 2561                 tlck = lid_to_tlock(lid);
 2562                 if (tlck->flag & tlckFREELOCK) {
 2563                         xtlck->next = tlck->next;
 2564                         txLockFree(lid);
 2565                 } else {
 2566                         xtlck = tlck;
 2567                         xlid = lid;
 2568                 }
 2569         }
 2570 
 2571         if (jfs_ip->atlhead)
 2572                 jfs_ip->atltail = xlid;
 2573         else {
 2574                 jfs_ip->atltail = 0;
 2575                 /*
 2576                  * If inode was on anon_list, remove it
 2577                  */
 2578                 TXN_LOCK();
 2579                 list_del_init(&jfs_ip->anon_inode_list);
 2580                 TXN_UNLOCK();
 2581         }
 2582 }
 2583 
 2584 
 2585 /*
 2586  *      txAbort()
 2587  *
 2588  * function: abort tx before commit;
 2589  *
 2590  * frees line-locks and segment locks for all
 2591  * segments in comdata structure.
 2592  * Optionally sets state of file-system to FM_DIRTY in super-block.
 2593  * log age of page-frames in memory for which caller has
 2594  * are reset to 0 (to avoid logwarap).
 2595  */
 2596 void txAbort(tid_t tid, int dirty)
 2597 {
 2598         lid_t lid, next;
 2599         struct metapage *mp;
 2600         struct tblock *tblk = tid_to_tblock(tid);
 2601 
 2602         jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty);
 2603 
 2604         /*
 2605          * free tlocks of the transaction
 2606          */
 2607         for (lid = tblk->next; lid; lid = next) {
 2608                 next = lid_to_tlock(lid)->next;
 2609 
 2610                 mp = lid_to_tlock(lid)->mp;
 2611 
 2612                 if (mp) {
 2613                         mp->lid = 0;
 2614 
 2615                         /*
 2616                          * reset lsn of page to avoid logwarap:
 2617                          *
 2618                          * (page may have been previously committed by another
 2619                          * transaction(s) but has not been paged, i.e.,
 2620                          * it may be on logsync list even though it has not
 2621                          * been logged for the current tx.)
 2622                          */
 2623                         if (mp->xflag & COMMIT_PAGE && mp->lsn)
 2624                                 LogSyncRelease(mp);
 2625                 }
 2626                 /* insert tlock at head of freelist */
 2627                 TXN_LOCK();
 2628                 txLockFree(lid);
 2629                 TXN_UNLOCK();
 2630         }
 2631 
 2632         /* caller will free the transaction block */
 2633 
 2634         tblk->next = tblk->last = 0;
 2635 
 2636         /*
 2637          * mark filesystem dirty
 2638          */
 2639         if (dirty)
 2640                 updateSuper(tblk->sb, FM_DIRTY);
 2641 
 2642         return;
 2643 }
 2644 
 2645 
 2646 /*
 2647  *      txAbortCommit()
 2648  *
 2649  * function: abort commit.
 2650  *
 2651  * frees tlocks of transaction; line-locks and segment locks for all
 2652  * segments in comdata structure. frees malloc storage
 2653  * sets state of file-system to FM_MDIRTY in super-block.
 2654  * log age of page-frames in memory for which caller has
 2655  * are reset to 0 (to avoid logwarap).
 2656  */
 2657 void txAbortCommit(struct commit * cd, int exval)
 2658 {
 2659         struct tblock *tblk;
 2660         tid_t tid;
 2661         lid_t lid, next;
 2662         struct metapage *mp;
 2663 
 2664         assert(exval == EIO || exval == ENOMEM);
 2665         jfs_warn("txAbortCommit: cd:0x%p", cd);
 2666 
 2667         /*
 2668          * free tlocks of the transaction
 2669          */
 2670         tid = cd->tid;
 2671         tblk = tid_to_tblock(tid);
 2672         for (lid = tblk->next; lid; lid = next) {
 2673                 next = lid_to_tlock(lid)->next;
 2674 
 2675                 mp = lid_to_tlock(lid)->mp;
 2676                 if (mp) {
 2677                         mp->lid = 0;
 2678 
 2679                         /*
 2680                          * reset lsn of page to avoid logwarap;
 2681                          */
 2682                         if (mp->xflag & COMMIT_PAGE)
 2683                                 LogSyncRelease(mp);
 2684                 }
 2685 
 2686                 /* insert tlock at head of freelist */
 2687                 TXN_LOCK();
 2688                 txLockFree(lid);
 2689                 TXN_UNLOCK();
 2690         }
 2691 
 2692         tblk->next = tblk->last = 0;
 2693 
 2694         /* free the transaction block */
 2695         txEnd(tid);
 2696 
 2697         /*
 2698          * mark filesystem dirty
 2699          */
 2700         updateSuper(cd->sb, FM_DIRTY);
 2701 }
 2702 
 2703 
 2704 /*
 2705  *      txLazyCommit(void)
 2706  *
 2707  *      All transactions except those changing ipimap (COMMIT_FORCE) are
 2708  *      processed by this routine.  This insures that the inode and block
 2709  *      allocation maps are updated in order.  For synchronous transactions,
 2710  *      let the user thread finish processing after txUpdateMap() is called.
 2711  */
 2712 void txLazyCommit(struct tblock * tblk)
 2713 {
 2714         struct jfs_log *log;
 2715 
 2716         while (((tblk->flag & tblkGC_READY) == 0) &&
 2717                ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
 2718                 /* We must have gotten ahead of the user thread
 2719                  */
 2720                 jfs_info("txLazyCommit: tblk 0x%p not unlocked", tblk);
 2721                 schedule();
 2722         }
 2723 
 2724         jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
 2725 
 2726         txUpdateMap(tblk);
 2727 
 2728         log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
 2729 
 2730         spin_lock_irq(&log->gclock);    // LOGGC_LOCK
 2731 
 2732         tblk->flag |= tblkGC_COMMITTED;
 2733 
 2734         if (tblk->flag & tblkGC_READY)
 2735                 log->gcrtc--;
 2736 
 2737         wake_up_all(&tblk->gcwait);     // LOGGC_WAKEUP
 2738 
 2739         /*
 2740          * Can't release log->gclock until we've tested tblk->flag
 2741          */
 2742         if (tblk->flag & tblkGC_LAZY) {
 2743                 spin_unlock_irq(&log->gclock);  // LOGGC_UNLOCK
 2744                 txUnlock(tblk);
 2745                 tblk->flag &= ~tblkGC_LAZY;
 2746                 txEnd(tblk - TxBlock);  /* Convert back to tid */
 2747         } else
 2748                 spin_unlock_irq(&log->gclock);  // LOGGC_UNLOCK
 2749 
 2750         jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
 2751 }
 2752 
 2753 /*
 2754  *      jfs_lazycommit(void)
 2755  *
 2756  *      To be run as a kernel daemon.  If lbmIODone is called in an interrupt
 2757  *      context, or where blocking is not wanted, this routine will process
 2758  *      committed transactions from the unlock queue.
 2759  */
 2760 int jfs_lazycommit(void *arg)
 2761 {
 2762         int WorkDone;
 2763         struct tblock *tblk;
 2764         unsigned long flags;
 2765 
 2766         lock_kernel();
 2767 
 2768         daemonize();
 2769         current->tty = NULL;
 2770         strcpy(current->comm, "jfsCommit");
 2771 
 2772         unlock_kernel();
 2773 
 2774         jfsCommitTask = current;
 2775 
 2776         spin_lock_irq(&current->sigmask_lock);
 2777         sigfillset(&current->blocked);
 2778         recalc_sigpending(current);
 2779         spin_unlock_irq(&current->sigmask_lock);
 2780 
 2781         LAZY_LOCK_INIT();
 2782         TxAnchor.unlock_queue = TxAnchor.unlock_tail = 0;
 2783 
 2784         complete(&jfsIOwait);
 2785 
 2786         do {
 2787                 DECLARE_WAITQUEUE(wq, current);
 2788 
 2789                 LAZY_LOCK(flags);
 2790 restart:
 2791                 WorkDone = 0;
 2792                 while ((tblk = TxAnchor.unlock_queue)) {
 2793                         /*
 2794                          * We can't get ahead of user thread.  Spinning is
 2795                          * simpler than blocking/waking.  We shouldn't spin
 2796                          * very long, since user thread shouldn't be blocking
 2797                          * between lmGroupCommit & txEnd.
 2798                          */
 2799                         WorkDone = 1;
 2800 
 2801                         /*
 2802                          * Remove first transaction from queue
 2803                          */
 2804                         TxAnchor.unlock_queue = tblk->cqnext;
 2805                         tblk->cqnext = 0;
 2806                         if (TxAnchor.unlock_tail == tblk)
 2807                                 TxAnchor.unlock_tail = 0;
 2808 
 2809                         LAZY_UNLOCK(flags);
 2810                         txLazyCommit(tblk);
 2811 
 2812                         /*
 2813                          * We can be running indefinately if other processors
 2814                          * are adding transactions to this list
 2815                          */
 2816                         cond_resched();
 2817                         LAZY_LOCK(flags);
 2818                 }
 2819 
 2820                 if (WorkDone)
 2821                         goto restart;
 2822 
 2823                 add_wait_queue(&jfs_commit_thread_wait, &wq);
 2824                 set_current_state(TASK_INTERRUPTIBLE);
 2825                 LAZY_UNLOCK(flags);
 2826                 schedule();
 2827                 current->state = TASK_RUNNING;
 2828                 remove_wait_queue(&jfs_commit_thread_wait, &wq);
 2829         } while (!jfs_stop_threads);
 2830 
 2831         if (TxAnchor.unlock_queue)
 2832                 jfs_err("jfs_lazycommit being killed w/pending transactions!");
 2833         else
 2834                 jfs_info("jfs_lazycommit being killed\n");
 2835         complete(&jfsIOwait);
 2836         return 0;
 2837 }
 2838 
 2839 void txLazyUnlock(struct tblock * tblk)
 2840 {
 2841         unsigned long flags;
 2842 
 2843         LAZY_LOCK(flags);
 2844 
 2845         if (TxAnchor.unlock_tail)
 2846                 TxAnchor.unlock_tail->cqnext = tblk;
 2847         else
 2848                 TxAnchor.unlock_queue = tblk;
 2849         TxAnchor.unlock_tail = tblk;
 2850         tblk->cqnext = 0;
 2851         LAZY_UNLOCK(flags);
 2852         wake_up(&jfs_commit_thread_wait);
 2853 }
 2854 
 2855 static void LogSyncRelease(struct metapage * mp)
 2856 {
 2857         struct jfs_log *log = mp->log;
 2858 
 2859         assert(atomic_read(&mp->nohomeok));
 2860         assert(log);
 2861         atomic_dec(&mp->nohomeok);
 2862 
 2863         if (atomic_read(&mp->nohomeok))
 2864                 return;
 2865 
 2866         hold_metapage(mp, 0);
 2867 
 2868         LOGSYNC_LOCK(log);
 2869         mp->log = NULL;
 2870         mp->lsn = 0;
 2871         mp->clsn = 0;
 2872         log->count--;
 2873         list_del_init(&mp->synclist);
 2874         LOGSYNC_UNLOCK(log);
 2875 
 2876         release_metapage(mp);
 2877 }
 2878 
 2879 /*
 2880  *      txQuiesce
 2881  *
 2882  *      Block all new transactions and push anonymous transactions to
 2883  *      completion
 2884  *
 2885  *      This does almost the same thing as jfs_sync below.  We don't
 2886  *      worry about deadlocking when TlocksLow is set, since we would
 2887  *      expect jfs_sync to get us out of that jam.
 2888  */
 2889 void txQuiesce(struct super_block *sb)
 2890 {
 2891         struct inode *ip;
 2892         struct jfs_inode_info *jfs_ip;
 2893         struct jfs_log *log = JFS_SBI(sb)->log;
 2894         tid_t tid;
 2895 
 2896         set_bit(log_QUIESCE, &log->flag);
 2897 
 2898         TXN_LOCK();
 2899 restart:
 2900         while (!list_empty(&TxAnchor.anon_list)) {
 2901                 jfs_ip = list_entry(TxAnchor.anon_list.next,
 2902                                     struct jfs_inode_info,
 2903                                     anon_inode_list);
 2904                 ip = jfs_ip->inode;
 2905 
 2906                 /*
 2907                  * inode will be removed from anonymous list
 2908                  * when it is committed
 2909                  */
 2910                 TXN_UNLOCK();
 2911                 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
 2912                 down(&jfs_ip->commit_sem);
 2913                 txCommit(tid, 1, &ip, 0);
 2914                 txEnd(tid);
 2915                 up(&jfs_ip->commit_sem);
 2916                 /*
 2917                  * Just to be safe.  I don't know how
 2918                  * long we can run without blocking
 2919                  */
 2920                 cond_resched();
 2921                 TXN_LOCK();
 2922         }
 2923 
 2924         /*
 2925          * If jfs_sync is running in parallel, there could be some inodes
 2926          * on anon_list2.  Let's check.
 2927          */
 2928         if (!list_empty(&TxAnchor.anon_list2)) {
 2929                 list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
 2930                 INIT_LIST_HEAD(&TxAnchor.anon_list2);
 2931                 goto restart;
 2932         }
 2933         TXN_UNLOCK();
 2934 
 2935         /*
 2936          * We may need to kick off the group commit
 2937          */
 2938         jfs_flush_journal(log, 0);
 2939 }
 2940 
 2941 /*
 2942  * txResume()
 2943  *
 2944  * Allows transactions to start again following txQuiesce
 2945  */
 2946 void txResume(struct super_block *sb)
 2947 {
 2948         struct jfs_log *log = JFS_SBI(sb)->log;
 2949 
 2950         clear_bit(log_QUIESCE, &log->flag);
 2951         TXN_WAKEUP(&log->syncwait);
 2952 }
 2953 
 2954 /*
 2955  *      jfs_sync(void)
 2956  *
 2957  *      To be run as a kernel daemon.  This is awakened when tlocks run low.
 2958  *      We write any inodes that have anonymous tlocks so they will become
 2959  *      available.
 2960  */
 2961 int jfs_sync(void *arg)
 2962 {
 2963         struct inode *ip;
 2964         struct jfs_inode_info *jfs_ip;
 2965         int rc;
 2966         tid_t tid;
 2967 
 2968         lock_kernel();
 2969 
 2970         daemonize();
 2971         current->tty = NULL;
 2972         strcpy(current->comm, "jfsSync");
 2973 
 2974         unlock_kernel();
 2975 
 2976         spin_lock_irq(&current->sigmask_lock);
 2977         sigfillset(&current->blocked);
 2978         recalc_sigpending(current);
 2979         spin_unlock_irq(&current->sigmask_lock);
 2980 
 2981         complete(&jfsIOwait);
 2982 
 2983         do {
 2984                 DECLARE_WAITQUEUE(wq, current);
 2985                 /*
 2986                  * write each inode on the anonymous inode list
 2987                  */
 2988                 TXN_LOCK();
 2989                 while (TxAnchor.TlocksLow && !list_empty(&TxAnchor.anon_list)) {
 2990                         jfs_ip = list_entry(TxAnchor.anon_list.next,
 2991                                             struct jfs_inode_info,
 2992                                             anon_inode_list);
 2993                         ip = jfs_ip->inode;
 2994 
 2995                         /*
 2996                          * down_trylock returns 0 on success.  This is
 2997                          * inconsistent with spin_trylock.
 2998                          */
 2999                         if (! down_trylock(&jfs_ip->commit_sem)) {
 3000                                 /*
 3001                                  * inode will be removed from anonymous list
 3002                                  * when it is committed
 3003                                  */
 3004                                 TXN_UNLOCK();
 3005                                 tid = txBegin(ip->i_sb, COMMIT_INODE);
 3006                                 rc = txCommit(tid, 1, &ip, 0);
 3007                                 txEnd(tid);
 3008                                 up(&jfs_ip->commit_sem);
 3009                                 /*
 3010                                  * Just to be safe.  I don't know how
 3011                                  * long we can run without blocking
 3012                                  */
 3013                                 cond_resched();
 3014                                 TXN_LOCK();
 3015                         } else {
 3016                                 /* We can't get the commit semaphore.  It may
 3017                                  * be held by a thread waiting for tlock's
 3018                                  * so let's not block here.  Save it to
 3019                                  * put back on the anon_list.
 3020                                  */
 3021 
 3022                                 /* Take off anon_list */
 3023                                 list_del(&jfs_ip->anon_inode_list);
 3024 
 3025                                 /* Put on anon_list2 */
 3026                                 list_add(&jfs_ip->anon_inode_list,
 3027                                          &TxAnchor.anon_list2);
 3028                         }
 3029                 }
 3030                 /* Add anon_list2 back to anon_list */
 3031                 if (!list_empty(&TxAnchor.anon_list2)) {
 3032                         list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
 3033                         INIT_LIST_HEAD(&TxAnchor.anon_list2);
 3034                 }
 3035                 add_wait_queue(&jfs_sync_thread_wait, &wq);
 3036                 set_current_state(TASK_INTERRUPTIBLE);
 3037                 TXN_UNLOCK();
 3038                 schedule();
 3039                 current->state = TASK_RUNNING;
 3040                 remove_wait_queue(&jfs_sync_thread_wait, &wq);
 3041         } while (!jfs_stop_threads);
 3042 
 3043         jfs_info("jfs_sync being killed");
 3044         complete(&jfsIOwait);
 3045         return 0;
 3046 }
 3047 
 3048 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
 3049 int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
 3050                       int *eof, void *data)
 3051 {
 3052         int len = 0;
 3053         off_t begin;
 3054         char *freewait;
 3055         char *freelockwait;
 3056         char *lowlockwait;
 3057 
 3058         freewait =
 3059             waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
 3060         freelockwait =
 3061             waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
 3062         lowlockwait =
 3063             waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
 3064 
 3065         len += sprintf(buffer,
 3066                        "JFS TxAnchor\n"
 3067                        "============\n"
 3068                        "freetid = %d\n"
 3069                        "freewait = %s\n"
 3070                        "freelock = %d\n"
 3071                        "freelockwait = %s\n"
 3072                        "lowlockwait = %s\n"
 3073                        "tlocksInUse = %d\n"
 3074                        "TlocksLow = %d\n"
 3075                        "unlock_queue = 0x%p\n"
 3076                        "unlock_tail = 0x%p\n",
 3077                        TxAnchor.freetid,
 3078                        freewait,
 3079                        TxAnchor.freelock,
 3080                        freelockwait,
 3081                        lowlockwait,
 3082                        TxAnchor.tlocksInUse,
 3083                        TxAnchor.TlocksLow,
 3084                        TxAnchor.unlock_queue,
 3085                        TxAnchor.unlock_tail);
 3086 
 3087         begin = offset;
 3088         *start = buffer + begin;
 3089         len -= begin;
 3090 
 3091         if (len > length)
 3092                 len = length;
 3093         else
 3094                 *eof = 1;
 3095 
 3096         if (len < 0)
 3097                 len = 0;
 3098 
 3099         return len;
 3100 }
 3101 #endif
 3102 
 3103 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
 3104 int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
 3105                      int *eof, void *data)
 3106 {
 3107         int len = 0;
 3108         off_t begin;
 3109 
 3110         len += sprintf(buffer,
 3111                        "JFS TxStats\n"
 3112                        "===========\n"
 3113                        "calls to txBegin = %d\n"
 3114                        "txBegin blocked by sync barrier = %d\n"
 3115                        "txBegin blocked by tlocks low = %d\n"
 3116                        "txBegin blocked by no free tid = %d\n"
 3117                        "calls to txBeginAnon = %d\n"
 3118                        "txBeginAnon blocked by sync barrier = %d\n"
 3119                        "txBeginAnon blocked by tlocks low = %d\n"
 3120                        "calls to txLockAlloc = %d\n"
 3121                        "tLockAlloc blocked by no free lock = %d\n",
 3122                        TxStat.txBegin,
 3123                        TxStat.txBegin_barrier,
 3124                        TxStat.txBegin_lockslow,
 3125                        TxStat.txBegin_freetid,
 3126                        TxStat.txBeginAnon,
 3127                        TxStat.txBeginAnon_barrier,
 3128                        TxStat.txBeginAnon_lockslow,
 3129                        TxStat.txLockAlloc,
 3130                        TxStat.txLockAlloc_freelock);
 3131 
 3132         begin = offset;
 3133         *start = buffer + begin;
 3134         len -= begin;
 3135 
 3136         if (len > length)
 3137                 len = length;
 3138         else
 3139                 *eof = 1;
 3140 
 3141         if (len < 0)
 3142                 len = 0;
 3143 
 3144         return len;
 3145 }
 3146 #endif

Cache object: d0c8a2f14b2feab3291d6d32d057d77f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.