Showing error 1868

User: Jiri Slaby
Error type: Invalid Pointer Dereference
Error type description: A pointer which is invalid is being dereferenced
File location: fs/ext3/inode.c
Line in file: 865
Project: Linux Kernel
Project version: 2.6.28
Tools: Smatch (1.59)
Entered: 2013-09-11 08:47:26 UTC


Source:

   1/*
   2 *  linux/fs/ext3/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  Goal-directed block allocation by Stephen Tweedie
  16 *        (sct@redhat.com), 1993, 1998
  17 *  Big-endian to little-endian byte-swapping/bitmaps by
  18 *        David S. Miller (davem@caip.rutgers.edu), 1995
  19 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  20 *        (jj@sunsite.ms.mff.cuni.cz)
  21 *
  22 *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/fs.h>
  27#include <linux/time.h>
  28#include <linux/ext3_jbd.h>
  29#include <linux/jbd.h>
  30#include <linux/highuid.h>
  31#include <linux/pagemap.h>
  32#include <linux/quotaops.h>
  33#include <linux/string.h>
  34#include <linux/buffer_head.h>
  35#include <linux/writeback.h>
  36#include <linux/mpage.h>
  37#include <linux/uio.h>
  38#include <linux/bio.h>
  39#include <linux/fiemap.h>
  40#include "xattr.h"
  41#include "acl.h"
  42
  43static int ext3_writepage_trans_blocks(struct inode *inode);
  44
  45/*
  46 * Test whether an inode is a fast symlink.
  47 */
  48static int ext3_inode_is_fast_symlink(struct inode *inode)
  49{
  50        int ea_blocks = EXT3_I(inode)->i_file_acl ?
  51                (inode->i_sb->s_blocksize >> 9) : 0;
  52
  53        return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  54}
  55
  56/*
  57 * The ext3 forget function must perform a revoke if we are freeing data
  58 * which has been journaled.  Metadata (eg. indirect blocks) must be
  59 * revoked in all cases.
  60 *
  61 * "bh" may be NULL: a metadata block may have been freed from memory
  62 * but there may still be a record of it in the journal, and that record
  63 * still needs to be revoked.
  64 */
  65int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  66                        struct buffer_head *bh, ext3_fsblk_t blocknr)
  67{
  68        int err;
  69
  70        might_sleep();
  71
  72        BUFFER_TRACE(bh, "enter");
  73
  74        jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  75                  "data mode %lx\n",
  76                  bh, is_metadata, inode->i_mode,
  77                  test_opt(inode->i_sb, DATA_FLAGS));
  78
  79        /* Never use the revoke function if we are doing full data
  80         * journaling: there is no need to, and a V1 superblock won't
  81         * support it.  Otherwise, only skip the revoke on un-journaled
  82         * data blocks. */
  83
  84        if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
  85            (!is_metadata && !ext3_should_journal_data(inode))) {
  86                if (bh) {
  87                        BUFFER_TRACE(bh, "call journal_forget");
  88                        return ext3_journal_forget(handle, bh);
  89                }
  90                return 0;
  91        }
  92
  93        /*
  94         * data!=journal && (is_metadata || should_journal_data(inode))
  95         */
  96        BUFFER_TRACE(bh, "call ext3_journal_revoke");
  97        err = ext3_journal_revoke(handle, blocknr, bh);
  98        if (err)
  99                ext3_abort(inode->i_sb, __func__,
 100                           "error %d when attempting revoke", err);
 101        BUFFER_TRACE(bh, "exit");
 102        return err;
 103}
 104
 105/*
 106 * Work out how many blocks we need to proceed with the next chunk of a
 107 * truncate transaction.
 108 */
 109static unsigned long blocks_for_truncate(struct inode *inode)
 110{
 111        unsigned long needed;
 112
 113        needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
 114
 115        /* Give ourselves just enough room to cope with inodes in which
 116         * i_blocks is corrupt: we've seen disk corruptions in the past
 117         * which resulted in random data in an inode which looked enough
 118         * like a regular file for ext3 to try to delete it.  Things
 119         * will go a bit crazy if that happens, but at least we should
 120         * try not to panic the whole kernel. */
 121        if (needed < 2)
 122                needed = 2;
 123
 124        /* But we need to bound the transaction so we don't overflow the
 125         * journal. */
 126        if (needed > EXT3_MAX_TRANS_DATA)
 127                needed = EXT3_MAX_TRANS_DATA;
 128
 129        return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
 130}
 131
 132/*
 133 * Truncate transactions can be complex and absolutely huge.  So we need to
 134 * be able to restart the transaction at a conventient checkpoint to make
 135 * sure we don't overflow the journal.
 136 *
 137 * start_transaction gets us a new handle for a truncate transaction,
 138 * and extend_transaction tries to extend the existing one a bit.  If
 139 * extend fails, we need to propagate the failure up and restart the
 140 * transaction in the top-level truncate loop. --sct
 141 */
 142static handle_t *start_transaction(struct inode *inode)
 143{
 144        handle_t *result;
 145
 146        result = ext3_journal_start(inode, blocks_for_truncate(inode));
 147        if (!IS_ERR(result))
 148                return result;
 149
 150        ext3_std_error(inode->i_sb, PTR_ERR(result));
 151        return result;
 152}
 153
 154/*
 155 * Try to extend this transaction for the purposes of truncation.
 156 *
 157 * Returns 0 if we managed to create more room.  If we can't create more
 158 * room, and the transaction must be restarted we return 1.
 159 */
 160static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
 161{
 162        if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
 163                return 0;
 164        if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
 165                return 0;
 166        return 1;
 167}
 168
 169/*
 170 * Restart the transaction associated with *handle.  This does a commit,
 171 * so before we call here everything must be consistently dirtied against
 172 * this transaction.
 173 */
 174static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
 175{
 176        jbd_debug(2, "restarting handle %p\n", handle);
 177        return ext3_journal_restart(handle, blocks_for_truncate(inode));
 178}
 179
 180/*
 181 * Called at the last iput() if i_nlink is zero.
 182 */
 183void ext3_delete_inode (struct inode * inode)
 184{
 185        handle_t *handle;
 186
 187        truncate_inode_pages(&inode->i_data, 0);
 188
 189        if (is_bad_inode(inode))
 190                goto no_delete;
 191
 192        handle = start_transaction(inode);
 193        if (IS_ERR(handle)) {
 194                /*
 195                 * If we're going to skip the normal cleanup, we still need to
 196                 * make sure that the in-core orphan linked list is properly
 197                 * cleaned up.
 198                 */
 199                ext3_orphan_del(NULL, inode);
 200                goto no_delete;
 201        }
 202
 203        if (IS_SYNC(inode))
 204                handle->h_sync = 1;
 205        inode->i_size = 0;
 206        if (inode->i_blocks)
 207                ext3_truncate(inode);
 208        /*
 209         * Kill off the orphan record which ext3_truncate created.
 210         * AKPM: I think this can be inside the above `if'.
 211         * Note that ext3_orphan_del() has to be able to cope with the
 212         * deletion of a non-existent orphan - this is because we don't
 213         * know if ext3_truncate() actually created an orphan record.
 214         * (Well, we could do this if we need to, but heck - it works)
 215         */
 216        ext3_orphan_del(handle, inode);
 217        EXT3_I(inode)->i_dtime        = get_seconds();
 218
 219        /*
 220         * One subtle ordering requirement: if anything has gone wrong
 221         * (transaction abort, IO errors, whatever), then we can still
 222         * do these next steps (the fs will already have been marked as
 223         * having errors), but we can't free the inode if the mark_dirty
 224         * fails.
 225         */
 226        if (ext3_mark_inode_dirty(handle, inode))
 227                /* If that failed, just do the required in-core inode clear. */
 228                clear_inode(inode);
 229        else
 230                ext3_free_inode(handle, inode);
 231        ext3_journal_stop(handle);
 232        return;
 233no_delete:
 234        clear_inode(inode);        /* We must guarantee clearing of inode... */
 235}
 236
 237typedef struct {
 238        __le32        *p;
 239        __le32        key;
 240        struct buffer_head *bh;
 241} Indirect;
 242
 243static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
 244{
 245        p->key = *(p->p = v);
 246        p->bh = bh;
 247}
 248
 249static int verify_chain(Indirect *from, Indirect *to)
 250{
 251        while (from <= to && from->key == *from->p)
 252                from++;
 253        return (from > to);
 254}
 255
 256/**
 257 *        ext3_block_to_path - parse the block number into array of offsets
 258 *        @inode: inode in question (we are only interested in its superblock)
 259 *        @i_block: block number to be parsed
 260 *        @offsets: array to store the offsets in
 261 *      @boundary: set this non-zero if the referred-to block is likely to be
 262 *             followed (on disk) by an indirect block.
 263 *
 264 *        To store the locations of file's data ext3 uses a data structure common
 265 *        for UNIX filesystems - tree of pointers anchored in the inode, with
 266 *        data blocks at leaves and indirect blocks in intermediate nodes.
 267 *        This function translates the block number into path in that tree -
 268 *        return value is the path length and @offsets[n] is the offset of
 269 *        pointer to (n+1)th node in the nth one. If @block is out of range
 270 *        (negative or too large) warning is printed and zero returned.
 271 *
 272 *        Note: function doesn't find node addresses, so no IO is needed. All
 273 *        we need to know is the capacity of indirect blocks (taken from the
 274 *        inode->i_sb).
 275 */
 276
 277/*
 278 * Portability note: the last comparison (check that we fit into triple
 279 * indirect block) is spelled differently, because otherwise on an
 280 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 281 * if our filesystem had 8Kb blocks. We might use long long, but that would
 282 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 283 * i_block would have to be negative in the very beginning, so we would not
 284 * get there at all.
 285 */
 286
 287static int ext3_block_to_path(struct inode *inode,
 288                        long i_block, int offsets[4], int *boundary)
 289{
 290        int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
 291        int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
 292        const long direct_blocks = EXT3_NDIR_BLOCKS,
 293                indirect_blocks = ptrs,
 294                double_blocks = (1 << (ptrs_bits * 2));
 295        int n = 0;
 296        int final = 0;
 297
 298        if (i_block < 0) {
 299                ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
 300        } else if (i_block < direct_blocks) {
 301                offsets[n++] = i_block;
 302                final = direct_blocks;
 303        } else if ( (i_block -= direct_blocks) < indirect_blocks) {
 304                offsets[n++] = EXT3_IND_BLOCK;
 305                offsets[n++] = i_block;
 306                final = ptrs;
 307        } else if ((i_block -= indirect_blocks) < double_blocks) {
 308                offsets[n++] = EXT3_DIND_BLOCK;
 309                offsets[n++] = i_block >> ptrs_bits;
 310                offsets[n++] = i_block & (ptrs - 1);
 311                final = ptrs;
 312        } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
 313                offsets[n++] = EXT3_TIND_BLOCK;
 314                offsets[n++] = i_block >> (ptrs_bits * 2);
 315                offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
 316                offsets[n++] = i_block & (ptrs - 1);
 317                final = ptrs;
 318        } else {
 319                ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
 320        }
 321        if (boundary)
 322                *boundary = final - 1 - (i_block & (ptrs - 1));
 323        return n;
 324}
 325
 326/**
 327 *        ext3_get_branch - read the chain of indirect blocks leading to data
 328 *        @inode: inode in question
 329 *        @depth: depth of the chain (1 - direct pointer, etc.)
 330 *        @offsets: offsets of pointers in inode/indirect blocks
 331 *        @chain: place to store the result
 332 *        @err: here we store the error value
 333 *
 334 *        Function fills the array of triples <key, p, bh> and returns %NULL
 335 *        if everything went OK or the pointer to the last filled triple
 336 *        (incomplete one) otherwise. Upon the return chain[i].key contains
 337 *        the number of (i+1)-th block in the chain (as it is stored in memory,
 338 *        i.e. little-endian 32-bit), chain[i].p contains the address of that
 339 *        number (it points into struct inode for i==0 and into the bh->b_data
 340 *        for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 341 *        block for i>0 and NULL for i==0. In other words, it holds the block
 342 *        numbers of the chain, addresses they were taken from (and where we can
 343 *        verify that chain did not change) and buffer_heads hosting these
 344 *        numbers.
 345 *
 346 *        Function stops when it stumbles upon zero pointer (absent block)
 347 *                (pointer to last triple returned, *@err == 0)
 348 *        or when it gets an IO error reading an indirect block
 349 *                (ditto, *@err == -EIO)
 350 *        or when it notices that chain had been changed while it was reading
 351 *                (ditto, *@err == -EAGAIN)
 352 *        or when it reads all @depth-1 indirect blocks successfully and finds
 353 *        the whole chain, all way to the data (returns %NULL, *err == 0).
 354 */
 355static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
 356                                 Indirect chain[4], int *err)
 357{
 358        struct super_block *sb = inode->i_sb;
 359        Indirect *p = chain;
 360        struct buffer_head *bh;
 361
 362        *err = 0;
 363        /* i_data is not going away, no lock needed */
 364        add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
 365        if (!p->key)
 366                goto no_block;
 367        while (--depth) {
 368                bh = sb_bread(sb, le32_to_cpu(p->key));
 369                if (!bh)
 370                        goto failure;
 371                /* Reader: pointers */
 372                if (!verify_chain(chain, p))
 373                        goto changed;
 374                add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
 375                /* Reader: end */
 376                if (!p->key)
 377                        goto no_block;
 378        }
 379        return NULL;
 380
 381changed:
 382        brelse(bh);
 383        *err = -EAGAIN;
 384        goto no_block;
 385failure:
 386        *err = -EIO;
 387no_block:
 388        return p;
 389}
 390
 391/**
 392 *        ext3_find_near - find a place for allocation with sufficient locality
 393 *        @inode: owner
 394 *        @ind: descriptor of indirect block.
 395 *
 396 *        This function returns the preferred place for block allocation.
 397 *        It is used when heuristic for sequential allocation fails.
 398 *        Rules are:
 399 *          + if there is a block to the left of our position - allocate near it.
 400 *          + if pointer will live in indirect block - allocate near that block.
 401 *          + if pointer will live in inode - allocate in the same
 402 *            cylinder group.
 403 *
 404 * In the latter case we colour the starting block by the callers PID to
 405 * prevent it from clashing with concurrent allocations for a different inode
 406 * in the same block group.   The PID is used here so that functionally related
 407 * files will be close-by on-disk.
 408 *
 409 *        Caller must make sure that @ind is valid and will stay that way.
 410 */
 411static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
 412{
 413        struct ext3_inode_info *ei = EXT3_I(inode);
 414        __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
 415        __le32 *p;
 416        ext3_fsblk_t bg_start;
 417        ext3_grpblk_t colour;
 418
 419        /* Try to find previous block */
 420        for (p = ind->p - 1; p >= start; p--) {
 421                if (*p)
 422                        return le32_to_cpu(*p);
 423        }
 424
 425        /* No such thing, so let's try location of indirect block */
 426        if (ind->bh)
 427                return ind->bh->b_blocknr;
 428
 429        /*
 430         * It is going to be referred to from the inode itself? OK, just put it
 431         * into the same cylinder group then.
 432         */
 433        bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
 434        colour = (current->pid % 16) *
 435                        (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
 436        return bg_start + colour;
 437}
 438
 439/**
 440 *        ext3_find_goal - find a preferred place for allocation.
 441 *        @inode: owner
 442 *        @block:  block we want
 443 *        @partial: pointer to the last triple within a chain
 444 *
 445 *        Normally this function find the preferred place for block allocation,
 446 *        returns it.
 447 */
 448
 449static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
 450                                   Indirect *partial)
 451{
 452        struct ext3_block_alloc_info *block_i;
 453
 454        block_i =  EXT3_I(inode)->i_block_alloc_info;
 455
 456        /*
 457         * try the heuristic for sequential allocation,
 458         * failing that at least try to get decent locality.
 459         */
 460        if (block_i && (block == block_i->last_alloc_logical_block + 1)
 461                && (block_i->last_alloc_physical_block != 0)) {
 462                return block_i->last_alloc_physical_block + 1;
 463        }
 464
 465        return ext3_find_near(inode, partial);
 466}
 467
 468/**
 469 *        ext3_blks_to_allocate: Look up the block map and count the number
 470 *        of direct blocks need to be allocated for the given branch.
 471 *
 472 *        @branch: chain of indirect blocks
 473 *        @k: number of blocks need for indirect blocks
 474 *        @blks: number of data blocks to be mapped.
 475 *        @blocks_to_boundary:  the offset in the indirect block
 476 *
 477 *        return the total number of blocks to be allocate, including the
 478 *        direct and indirect blocks.
 479 */
 480static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
 481                int blocks_to_boundary)
 482{
 483        unsigned long count = 0;
 484
 485        /*
 486         * Simple case, [t,d]Indirect block(s) has not allocated yet
 487         * then it's clear blocks on that path have not allocated
 488         */
 489        if (k > 0) {
 490                /* right now we don't handle cross boundary allocation */
 491                if (blks < blocks_to_boundary + 1)
 492                        count += blks;
 493                else
 494                        count += blocks_to_boundary + 1;
 495                return count;
 496        }
 497
 498        count++;
 499        while (count < blks && count <= blocks_to_boundary &&
 500                le32_to_cpu(*(branch[0].p + count)) == 0) {
 501                count++;
 502        }
 503        return count;
 504}
 505
 506/**
 507 *        ext3_alloc_blocks: multiple allocate blocks needed for a branch
 508 *        @indirect_blks: the number of blocks need to allocate for indirect
 509 *                        blocks
 510 *
 511 *        @new_blocks: on return it will store the new block numbers for
 512 *        the indirect blocks(if needed) and the first direct block,
 513 *        @blks:        on return it will store the total number of allocated
 514 *                direct blocks
 515 */
 516static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
 517                        ext3_fsblk_t goal, int indirect_blks, int blks,
 518                        ext3_fsblk_t new_blocks[4], int *err)
 519{
 520        int target, i;
 521        unsigned long count = 0;
 522        int index = 0;
 523        ext3_fsblk_t current_block = 0;
 524        int ret = 0;
 525
 526        /*
 527         * Here we try to allocate the requested multiple blocks at once,
 528         * on a best-effort basis.
 529         * To build a branch, we should allocate blocks for
 530         * the indirect blocks(if not allocated yet), and at least
 531         * the first direct block of this branch.  That's the
 532         * minimum number of blocks need to allocate(required)
 533         */
 534        target = blks + indirect_blks;
 535
 536        while (1) {
 537                count = target;
 538                /* allocating blocks for indirect blocks and direct blocks */
 539                current_block = ext3_new_blocks(handle,inode,goal,&count,err);
 540                if (*err)
 541                        goto failed_out;
 542
 543                target -= count;
 544                /* allocate blocks for indirect blocks */
 545                while (index < indirect_blks && count) {
 546                        new_blocks[index++] = current_block++;
 547                        count--;
 548                }
 549
 550                if (count > 0)
 551                        break;
 552        }
 553
 554        /* save the new block number for the first direct block */
 555        new_blocks[index] = current_block;
 556
 557        /* total number of blocks allocated for direct blocks */
 558        ret = count;
 559        *err = 0;
 560        return ret;
 561failed_out:
 562        for (i = 0; i <index; i++)
 563                ext3_free_blocks(handle, inode, new_blocks[i], 1);
 564        return ret;
 565}
 566
 567/**
 568 *        ext3_alloc_branch - allocate and set up a chain of blocks.
 569 *        @inode: owner
 570 *        @indirect_blks: number of allocated indirect blocks
 571 *        @blks: number of allocated direct blocks
 572 *        @offsets: offsets (in the blocks) to store the pointers to next.
 573 *        @branch: place to store the chain in.
 574 *
 575 *        This function allocates blocks, zeroes out all but the last one,
 576 *        links them into chain and (if we are synchronous) writes them to disk.
 577 *        In other words, it prepares a branch that can be spliced onto the
 578 *        inode. It stores the information about that chain in the branch[], in
 579 *        the same format as ext3_get_branch() would do. We are calling it after
 580 *        we had read the existing part of chain and partial points to the last
 581 *        triple of that (one with zero ->key). Upon the exit we have the same
 582 *        picture as after the successful ext3_get_block(), except that in one
 583 *        place chain is disconnected - *branch->p is still zero (we did not
 584 *        set the last link), but branch->key contains the number that should
 585 *        be placed into *branch->p to fill that gap.
 586 *
 587 *        If allocation fails we free all blocks we've allocated (and forget
 588 *        their buffer_heads) and return the error value the from failed
 589 *        ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
 590 *        as described above and return 0.
 591 */
 592static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
 593                        int indirect_blks, int *blks, ext3_fsblk_t goal,
 594                        int *offsets, Indirect *branch)
 595{
 596        int blocksize = inode->i_sb->s_blocksize;
 597        int i, n = 0;
 598        int err = 0;
 599        struct buffer_head *bh;
 600        int num;
 601        ext3_fsblk_t new_blocks[4];
 602        ext3_fsblk_t current_block;
 603
 604        num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
 605                                *blks, new_blocks, &err);
 606        if (err)
 607                return err;
 608
 609        branch[0].key = cpu_to_le32(new_blocks[0]);
 610        /*
 611         * metadata blocks and data blocks are allocated.
 612         */
 613        for (n = 1; n <= indirect_blks;  n++) {
 614                /*
 615                 * Get buffer_head for parent block, zero it out
 616                 * and set the pointer to new one, then send
 617                 * parent to disk.
 618                 */
 619                bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
 620                branch[n].bh = bh;
 621                lock_buffer(bh);
 622                BUFFER_TRACE(bh, "call get_create_access");
 623                err = ext3_journal_get_create_access(handle, bh);
 624                if (err) {
 625                        unlock_buffer(bh);
 626                        brelse(bh);
 627                        goto failed;
 628                }
 629
 630                memset(bh->b_data, 0, blocksize);
 631                branch[n].p = (__le32 *) bh->b_data + offsets[n];
 632                branch[n].key = cpu_to_le32(new_blocks[n]);
 633                *branch[n].p = branch[n].key;
 634                if ( n == indirect_blks) {
 635                        current_block = new_blocks[n];
 636                        /*
 637                         * End of chain, update the last new metablock of
 638                         * the chain to point to the new allocated
 639                         * data blocks numbers
 640                         */
 641                        for (i=1; i < num; i++)
 642                                *(branch[n].p + i) = cpu_to_le32(++current_block);
 643                }
 644                BUFFER_TRACE(bh, "marking uptodate");
 645                set_buffer_uptodate(bh);
 646                unlock_buffer(bh);
 647
 648                BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
 649                err = ext3_journal_dirty_metadata(handle, bh);
 650                if (err)
 651                        goto failed;
 652        }
 653        *blks = num;
 654        return err;
 655failed:
 656        /* Allocation failed, free what we already allocated */
 657        for (i = 1; i <= n ; i++) {
 658                BUFFER_TRACE(branch[i].bh, "call journal_forget");
 659                ext3_journal_forget(handle, branch[i].bh);
 660        }
 661        for (i = 0; i <indirect_blks; i++)
 662                ext3_free_blocks(handle, inode, new_blocks[i], 1);
 663
 664        ext3_free_blocks(handle, inode, new_blocks[i], num);
 665
 666        return err;
 667}
 668
 669/**
 670 * ext3_splice_branch - splice the allocated branch onto inode.
 671 * @inode: owner
 672 * @block: (logical) number of block we are adding
 673 * @chain: chain of indirect blocks (with a missing link - see
 674 *        ext3_alloc_branch)
 675 * @where: location of missing link
 676 * @num:   number of indirect blocks we are adding
 677 * @blks:  number of direct blocks we are adding
 678 *
 679 * This function fills the missing link and does all housekeeping needed in
 680 * inode (->i_blocks, etc.). In case of success we end up with the full
 681 * chain to new block and return 0.
 682 */
 683static int ext3_splice_branch(handle_t *handle, struct inode *inode,
 684                        long block, Indirect *where, int num, int blks)
 685{
 686        int i;
 687        int err = 0;
 688        struct ext3_block_alloc_info *block_i;
 689        ext3_fsblk_t current_block;
 690
 691        block_i = EXT3_I(inode)->i_block_alloc_info;
 692        /*
 693         * If we're splicing into a [td]indirect block (as opposed to the
 694         * inode) then we need to get write access to the [td]indirect block
 695         * before the splice.
 696         */
 697        if (where->bh) {
 698                BUFFER_TRACE(where->bh, "get_write_access");
 699                err = ext3_journal_get_write_access(handle, where->bh);
 700                if (err)
 701                        goto err_out;
 702        }
 703        /* That's it */
 704
 705        *where->p = where->key;
 706
 707        /*
 708         * Update the host buffer_head or inode to point to more just allocated
 709         * direct blocks blocks
 710         */
 711        if (num == 0 && blks > 1) {
 712                current_block = le32_to_cpu(where->key) + 1;
 713                for (i = 1; i < blks; i++)
 714                        *(where->p + i ) = cpu_to_le32(current_block++);
 715        }
 716
 717        /*
 718         * update the most recently allocated logical & physical block
 719         * in i_block_alloc_info, to assist find the proper goal block for next
 720         * allocation
 721         */
 722        if (block_i) {
 723                block_i->last_alloc_logical_block = block + blks - 1;
 724                block_i->last_alloc_physical_block =
 725                                le32_to_cpu(where[num].key) + blks - 1;
 726        }
 727
 728        /* We are done with atomic stuff, now do the rest of housekeeping */
 729
 730        inode->i_ctime = CURRENT_TIME_SEC;
 731        ext3_mark_inode_dirty(handle, inode);
 732
 733        /* had we spliced it onto indirect block? */
 734        if (where->bh) {
 735                /*
 736                 * If we spliced it onto an indirect block, we haven't
 737                 * altered the inode.  Note however that if it is being spliced
 738                 * onto an indirect block at the very end of the file (the
 739                 * file is growing) then we *will* alter the inode to reflect
 740                 * the new i_size.  But that is not done here - it is done in
 741                 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
 742                 */
 743                jbd_debug(5, "splicing indirect only\n");
 744                BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
 745                err = ext3_journal_dirty_metadata(handle, where->bh);
 746                if (err)
 747                        goto err_out;
 748        } else {
 749                /*
 750                 * OK, we spliced it into the inode itself on a direct block.
 751                 * Inode was dirtied above.
 752                 */
 753                jbd_debug(5, "splicing direct\n");
 754        }
 755        return err;
 756
 757err_out:
 758        for (i = 1; i <= num; i++) {
 759                BUFFER_TRACE(where[i].bh, "call journal_forget");
 760                ext3_journal_forget(handle, where[i].bh);
 761                ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
 762        }
 763        ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
 764
 765        return err;
 766}
 767
 768/*
 769 * Allocation strategy is simple: if we have to allocate something, we will
 770 * have to go the whole way to leaf. So let's do it before attaching anything
 771 * to tree, set linkage between the newborn blocks, write them if sync is
 772 * required, recheck the path, free and repeat if check fails, otherwise
 773 * set the last missing link (that will protect us from any truncate-generated
 774 * removals - all blocks on the path are immune now) and possibly force the
 775 * write on the parent block.
 776 * That has a nice additional property: no special recovery from the failed
 777 * allocations is needed - we simply release blocks and do not touch anything
 778 * reachable from inode.
 779 *
 780 * `handle' can be NULL if create == 0.
 781 *
 782 * The BKL may not be held on entry here.  Be sure to take it early.
 783 * return > 0, # of blocks mapped or allocated.
 784 * return = 0, if plain lookup failed.
 785 * return < 0, error case.
 786 */
 787int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
 788                sector_t iblock, unsigned long maxblocks,
 789                struct buffer_head *bh_result,
 790                int create, int extend_disksize)
 791{
 792        int err = -EIO;
 793        int offsets[4];
 794        Indirect chain[4];
 795        Indirect *partial;
 796        ext3_fsblk_t goal;
 797        int indirect_blks;
 798        int blocks_to_boundary = 0;
 799        int depth;
 800        struct ext3_inode_info *ei = EXT3_I(inode);
 801        int count = 0;
 802        ext3_fsblk_t first_block = 0;
 803
 804
 805        J_ASSERT(handle != NULL || create == 0);
 806        depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 807
 808        if (depth == 0)
 809                goto out;
 810
 811        partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 812
 813        /* Simplest case - block found, no allocation needed */
 814        if (!partial) {
 815                first_block = le32_to_cpu(chain[depth - 1].key);
 816                clear_buffer_new(bh_result);
 817                count++;
 818                /*map more blocks*/
 819                while (count < maxblocks && count <= blocks_to_boundary) {
 820                        ext3_fsblk_t blk;
 821
 822                        if (!verify_chain(chain, partial)) {
 823                                /*
 824                                 * Indirect block might be removed by
 825                                 * truncate while we were reading it.
 826                                 * Handling of that case: forget what we've
 827                                 * got now. Flag the err as EAGAIN, so it
 828                                 * will reread.
 829                                 */
 830                                err = -EAGAIN;
 831                                count = 0;
 832                                break;
 833                        }
 834                        blk = le32_to_cpu(*(chain[depth-1].p + count));
 835
 836                        if (blk == first_block + count)
 837                                count++;
 838                        else
 839                                break;
 840                }
 841                if (err != -EAGAIN)
 842                        goto got_it;
 843        }
 844
 845        /* Next simple case - plain lookup or failed read of indirect block */
 846        if (!create || err == -EIO)
 847                goto cleanup;
 848
 849        mutex_lock(&ei->truncate_mutex);
 850
 851        /*
 852         * If the indirect block is missing while we are reading
 853         * the chain(ext3_get_branch() returns -EAGAIN err), or
 854         * if the chain has been changed after we grab the semaphore,
 855         * (either because another process truncated this branch, or
 856         * another get_block allocated this branch) re-grab the chain to see if
 857         * the request block has been allocated or not.
 858         *
 859         * Since we already block the truncate/other get_block
 860         * at this point, we will have the current copy of the chain when we
 861         * splice the branch into the tree.
 862         */
 863        if (err == -EAGAIN || !verify_chain(chain, partial)) {
 864                while (partial > chain) {
 865                        brelse(partial->bh);
 866                        partial--;
 867                }
 868                partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 869                if (!partial) {
 870                        count++;
 871                        mutex_unlock(&ei->truncate_mutex);
 872                        if (err)
 873                                goto cleanup;
 874                        clear_buffer_new(bh_result);
 875                        goto got_it;
 876                }
 877        }
 878
 879        /*
 880         * Okay, we need to do block allocation.  Lazily initialize the block
 881         * allocation info here if necessary
 882        */
 883        if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
 884                ext3_init_block_alloc_info(inode);
 885
 886        goal = ext3_find_goal(inode, iblock, partial);
 887
 888        /* the number of blocks need to allocate for [d,t]indirect blocks */
 889        indirect_blks = (chain + depth) - partial - 1;
 890
 891        /*
 892         * Next look up the indirect map to count the totoal number of
 893         * direct blocks to allocate for this branch.
 894         */
 895        count = ext3_blks_to_allocate(partial, indirect_blks,
 896                                        maxblocks, blocks_to_boundary);
 897        /*
 898         * Block out ext3_truncate while we alter the tree
 899         */
 900        err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
 901                                offsets + (partial - chain), partial);
 902
 903        /*
 904         * The ext3_splice_branch call will free and forget any buffers
 905         * on the new chain if there is a failure, but that risks using
 906         * up transaction credits, especially for bitmaps where the
 907         * credits cannot be returned.  Can we handle this somehow?  We
 908         * may need to return -EAGAIN upwards in the worst case.  --sct
 909         */
 910        if (!err)
 911                err = ext3_splice_branch(handle, inode, iblock,
 912                                        partial, indirect_blks, count);
 913        /*
 914         * i_disksize growing is protected by truncate_mutex.  Don't forget to
 915         * protect it if you're about to implement concurrent
 916         * ext3_get_block() -bzzz
 917        */
 918        if (!err && extend_disksize && inode->i_size > ei->i_disksize)
 919                ei->i_disksize = inode->i_size;
 920        mutex_unlock(&ei->truncate_mutex);
 921        if (err)
 922                goto cleanup;
 923
 924        set_buffer_new(bh_result);
 925got_it:
 926        map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
 927        if (count > blocks_to_boundary)
 928                set_buffer_boundary(bh_result);
 929        err = count;
 930        /* Clean up and exit */
 931        partial = chain + depth - 1;        /* the whole chain */
 932cleanup:
 933        while (partial > chain) {
 934                BUFFER_TRACE(partial->bh, "call brelse");
 935                brelse(partial->bh);
 936                partial--;
 937        }
 938        BUFFER_TRACE(bh_result, "returned");
 939out:
 940        return err;
 941}
 942
 943/* Maximum number of blocks we map for direct IO at once. */
 944#define DIO_MAX_BLOCKS 4096
 945/*
 946 * Number of credits we need for writing DIO_MAX_BLOCKS:
 947 * We need sb + group descriptor + bitmap + inode -> 4
 948 * For B blocks with A block pointers per block we need:
 949 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
 950 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
 951 */
 952#define DIO_CREDITS 25
 953
 954static int ext3_get_block(struct inode *inode, sector_t iblock,
 955                        struct buffer_head *bh_result, int create)
 956{
 957        handle_t *handle = ext3_journal_current_handle();
 958        int ret = 0, started = 0;
 959        unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
 960
 961        if (create && !handle) {        /* Direct IO write... */
 962                if (max_blocks > DIO_MAX_BLOCKS)
 963                        max_blocks = DIO_MAX_BLOCKS;
 964                handle = ext3_journal_start(inode, DIO_CREDITS +
 965                                2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb));
 966                if (IS_ERR(handle)) {
 967                        ret = PTR_ERR(handle);
 968                        goto out;
 969                }
 970                started = 1;
 971        }
 972
 973        ret = ext3_get_blocks_handle(handle, inode, iblock,
 974                                        max_blocks, bh_result, create, 0);
 975        if (ret > 0) {
 976                bh_result->b_size = (ret << inode->i_blkbits);
 977                ret = 0;
 978        }
 979        if (started)
 980                ext3_journal_stop(handle);
 981out:
 982        return ret;
 983}
 984
 985int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 986                u64 start, u64 len)
 987{
 988        return generic_block_fiemap(inode, fieinfo, start, len,
 989                                    ext3_get_block);
 990}
 991
 992/*
 993 * `handle' can be NULL if create is zero
 994 */
 995struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
 996                                long block, int create, int *errp)
 997{
 998        struct buffer_head dummy;
 999        int fatal = 0, err;
1000
1001        J_ASSERT(handle != NULL || create == 0);
1002
1003        dummy.b_state = 0;
1004        dummy.b_blocknr = -1000;
1005        buffer_trace_init(&dummy.b_history);
1006        err = ext3_get_blocks_handle(handle, inode, block, 1,
1007                                        &dummy, create, 1);
1008        /*
1009         * ext3_get_blocks_handle() returns number of blocks
1010         * mapped. 0 in case of a HOLE.
1011         */
1012        if (err > 0) {
1013                if (err > 1)
1014                        WARN_ON(1);
1015                err = 0;
1016        }
1017        *errp = err;
1018        if (!err && buffer_mapped(&dummy)) {
1019                struct buffer_head *bh;
1020                bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1021                if (!bh) {
1022                        *errp = -EIO;
1023                        goto err;
1024                }
1025                if (buffer_new(&dummy)) {
1026                        J_ASSERT(create != 0);
1027                        J_ASSERT(handle != NULL);
1028
1029                        /*
1030                         * Now that we do not always journal data, we should
1031                         * keep in mind whether this should always journal the
1032                         * new buffer as metadata.  For now, regular file
1033                         * writes use ext3_get_block instead, so it's not a
1034                         * problem.
1035                         */
1036                        lock_buffer(bh);
1037                        BUFFER_TRACE(bh, "call get_create_access");
1038                        fatal = ext3_journal_get_create_access(handle, bh);
1039                        if (!fatal && !buffer_uptodate(bh)) {
1040                                memset(bh->b_data,0,inode->i_sb->s_blocksize);
1041                                set_buffer_uptodate(bh);
1042                        }
1043                        unlock_buffer(bh);
1044                        BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1045                        err = ext3_journal_dirty_metadata(handle, bh);
1046                        if (!fatal)
1047                                fatal = err;
1048                } else {
1049                        BUFFER_TRACE(bh, "not a new buffer");
1050                }
1051                if (fatal) {
1052                        *errp = fatal;
1053                        brelse(bh);
1054                        bh = NULL;
1055                }
1056                return bh;
1057        }
1058err:
1059        return NULL;
1060}
1061
1062struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1063                               int block, int create, int *err)
1064{
1065        struct buffer_head * bh;
1066
1067        bh = ext3_getblk(handle, inode, block, create, err);
1068        if (!bh)
1069                return bh;
1070        if (buffer_uptodate(bh))
1071                return bh;
1072        ll_rw_block(READ_META, 1, &bh);
1073        wait_on_buffer(bh);
1074        if (buffer_uptodate(bh))
1075                return bh;
1076        put_bh(bh);
1077        *err = -EIO;
1078        return NULL;
1079}
1080
1081static int walk_page_buffers(        handle_t *handle,
1082                                struct buffer_head *head,
1083                                unsigned from,
1084                                unsigned to,
1085                                int *partial,
1086                                int (*fn)(        handle_t *handle,
1087                                                struct buffer_head *bh))
1088{
1089        struct buffer_head *bh;
1090        unsigned block_start, block_end;
1091        unsigned blocksize = head->b_size;
1092        int err, ret = 0;
1093        struct buffer_head *next;
1094
1095        for (        bh = head, block_start = 0;
1096                ret == 0 && (bh != head || !block_start);
1097                block_start = block_end, bh = next)
1098        {
1099                next = bh->b_this_page;
1100                block_end = block_start + blocksize;
1101                if (block_end <= from || block_start >= to) {
1102                        if (partial && !buffer_uptodate(bh))
1103                                *partial = 1;
1104                        continue;
1105                }
1106                err = (*fn)(handle, bh);
1107                if (!ret)
1108                        ret = err;
1109        }
1110        return ret;
1111}
1112
1113/*
1114 * To preserve ordering, it is essential that the hole instantiation and
1115 * the data write be encapsulated in a single transaction.  We cannot
1116 * close off a transaction and start a new one between the ext3_get_block()
1117 * and the commit_write().  So doing the journal_start at the start of
1118 * prepare_write() is the right place.
1119 *
1120 * Also, this function can nest inside ext3_writepage() ->
1121 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1122 * has generated enough buffer credits to do the whole page.  So we won't
1123 * block on the journal in that case, which is good, because the caller may
1124 * be PF_MEMALLOC.
1125 *
1126 * By accident, ext3 can be reentered when a transaction is open via
1127 * quota file writes.  If we were to commit the transaction while thus
1128 * reentered, there can be a deadlock - we would be holding a quota
1129 * lock, and the commit would never complete if another thread had a
1130 * transaction open and was blocking on the quota lock - a ranking
1131 * violation.
1132 *
1133 * So what we do is to rely on the fact that journal_stop/journal_start
1134 * will _not_ run commit under these circumstances because handle->h_ref
1135 * is elevated.  We'll still have enough credits for the tiny quotafile
1136 * write.
1137 */
1138static int do_journal_get_write_access(handle_t *handle,
1139                                        struct buffer_head *bh)
1140{
1141        if (!buffer_mapped(bh) || buffer_freed(bh))
1142                return 0;
1143        return ext3_journal_get_write_access(handle, bh);
1144}
1145
1146static int ext3_write_begin(struct file *file, struct address_space *mapping,
1147                                loff_t pos, unsigned len, unsigned flags,
1148                                struct page **pagep, void **fsdata)
1149{
1150        struct inode *inode = mapping->host;
1151        int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1152        handle_t *handle;
1153        int retries = 0;
1154        struct page *page;
1155        pgoff_t index;
1156        unsigned from, to;
1157
1158        index = pos >> PAGE_CACHE_SHIFT;
1159        from = pos & (PAGE_CACHE_SIZE - 1);
1160        to = from + len;
1161
1162retry:
1163        page = __grab_cache_page(mapping, index);
1164        if (!page)
1165                return -ENOMEM;
1166        *pagep = page;
1167
1168        handle = ext3_journal_start(inode, needed_blocks);
1169        if (IS_ERR(handle)) {
1170                unlock_page(page);
1171                page_cache_release(page);
1172                ret = PTR_ERR(handle);
1173                goto out;
1174        }
1175        ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1176                                                        ext3_get_block);
1177        if (ret)
1178                goto write_begin_failed;
1179
1180        if (ext3_should_journal_data(inode)) {
1181                ret = walk_page_buffers(handle, page_buffers(page),
1182                                from, to, NULL, do_journal_get_write_access);
1183        }
1184write_begin_failed:
1185        if (ret) {
1186                ext3_journal_stop(handle);
1187                unlock_page(page);
1188                page_cache_release(page);
1189                /*
1190                 * block_write_begin may have instantiated a few blocks
1191                 * outside i_size.  Trim these off again. Don't need
1192                 * i_size_read because we hold i_mutex.
1193                 */
1194                if (pos + len > inode->i_size)
1195                        vmtruncate(inode, inode->i_size);
1196        }
1197        if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1198                goto retry;
1199out:
1200        return ret;
1201}
1202
1203
1204int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1205{
1206        int err = journal_dirty_data(handle, bh);
1207        if (err)
1208                ext3_journal_abort_handle(__func__, __func__,
1209                                                bh, handle, err);
1210        return err;
1211}
1212
1213/* For write_end() in data=journal mode */
1214static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1215{
1216        if (!buffer_mapped(bh) || buffer_freed(bh))
1217                return 0;
1218        set_buffer_uptodate(bh);
1219        return ext3_journal_dirty_metadata(handle, bh);
1220}
1221
1222/*
1223 * Generic write_end handler for ordered and writeback ext3 journal modes.
1224 * We can't use generic_write_end, because that unlocks the page and we need to
1225 * unlock the page after ext3_journal_stop, but ext3_journal_stop must run
1226 * after block_write_end.
1227 */
1228static int ext3_generic_write_end(struct file *file,
1229                                struct address_space *mapping,
1230                                loff_t pos, unsigned len, unsigned copied,
1231                                struct page *page, void *fsdata)
1232{
1233        struct inode *inode = file->f_mapping->host;
1234
1235        copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1236
1237        if (pos+copied > inode->i_size) {
1238                i_size_write(inode, pos+copied);
1239                mark_inode_dirty(inode);
1240        }
1241
1242        return copied;
1243}
1244
1245/*
1246 * We need to pick up the new inode size which generic_commit_write gave us
1247 * `file' can be NULL - eg, when called from page_symlink().
1248 *
1249 * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1250 * buffers are managed internally.
1251 */
1252static int ext3_ordered_write_end(struct file *file,
1253                                struct address_space *mapping,
1254                                loff_t pos, unsigned len, unsigned copied,
1255                                struct page *page, void *fsdata)
1256{
1257        handle_t *handle = ext3_journal_current_handle();
1258        struct inode *inode = file->f_mapping->host;
1259        unsigned from, to;
1260        int ret = 0, ret2;
1261
1262        from = pos & (PAGE_CACHE_SIZE - 1);
1263        to = from + len;
1264
1265        ret = walk_page_buffers(handle, page_buffers(page),
1266                from, to, NULL, ext3_journal_dirty_data);
1267
1268        if (ret == 0) {
1269                /*
1270                 * generic_write_end() will run mark_inode_dirty() if i_size
1271                 * changes.  So let's piggyback the i_disksize mark_inode_dirty
1272                 * into that.
1273                 */
1274                loff_t new_i_size;
1275
1276                new_i_size = pos + copied;
1277                if (new_i_size > EXT3_I(inode)->i_disksize)
1278                        EXT3_I(inode)->i_disksize = new_i_size;
1279                ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
1280                                                        page, fsdata);
1281                copied = ret2;
1282                if (ret2 < 0)
1283                        ret = ret2;
1284        }
1285        ret2 = ext3_journal_stop(handle);
1286        if (!ret)
1287                ret = ret2;
1288        unlock_page(page);
1289        page_cache_release(page);
1290
1291        return ret ? ret : copied;
1292}
1293
1294static int ext3_writeback_write_end(struct file *file,
1295                                struct address_space *mapping,
1296                                loff_t pos, unsigned len, unsigned copied,
1297                                struct page *page, void *fsdata)
1298{
1299        handle_t *handle = ext3_journal_current_handle();
1300        struct inode *inode = file->f_mapping->host;
1301        int ret = 0, ret2;
1302        loff_t new_i_size;
1303
1304        new_i_size = pos + copied;
1305        if (new_i_size > EXT3_I(inode)->i_disksize)
1306                EXT3_I(inode)->i_disksize = new_i_size;
1307
1308        ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
1309                                                        page, fsdata);
1310        copied = ret2;
1311        if (ret2 < 0)
1312                ret = ret2;
1313
1314        ret2 = ext3_journal_stop(handle);
1315        if (!ret)
1316                ret = ret2;
1317        unlock_page(page);
1318        page_cache_release(page);
1319
1320        return ret ? ret : copied;
1321}
1322
1323static int ext3_journalled_write_end(struct file *file,
1324                                struct address_space *mapping,
1325                                loff_t pos, unsigned len, unsigned copied,
1326                                struct page *page, void *fsdata)
1327{
1328        handle_t *handle = ext3_journal_current_handle();
1329        struct inode *inode = mapping->host;
1330        int ret = 0, ret2;
1331        int partial = 0;
1332        unsigned from, to;
1333
1334        from = pos & (PAGE_CACHE_SIZE - 1);
1335        to = from + len;
1336
1337        if (copied < len) {
1338                if (!PageUptodate(page))
1339                        copied = 0;
1340                page_zero_new_buffers(page, from+copied, to);
1341        }
1342
1343        ret = walk_page_buffers(handle, page_buffers(page), from,
1344                                to, &partial, write_end_fn);
1345        if (!partial)
1346                SetPageUptodate(page);
1347        if (pos+copied > inode->i_size)
1348                i_size_write(inode, pos+copied);
1349        EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1350        if (inode->i_size > EXT3_I(inode)->i_disksize) {
1351                EXT3_I(inode)->i_disksize = inode->i_size;
1352                ret2 = ext3_mark_inode_dirty(handle, inode);
1353                if (!ret)
1354                        ret = ret2;
1355        }
1356
1357        ret2 = ext3_journal_stop(handle);
1358        if (!ret)
1359                ret = ret2;
1360        unlock_page(page);
1361        page_cache_release(page);
1362
1363        return ret ? ret : copied;
1364}
1365
1366/*
1367 * bmap() is special.  It gets used by applications such as lilo and by
1368 * the swapper to find the on-disk block of a specific piece of data.
1369 *
1370 * Naturally, this is dangerous if the block concerned is still in the
1371 * journal.  If somebody makes a swapfile on an ext3 data-journaling
1372 * filesystem and enables swap, then they may get a nasty shock when the
1373 * data getting swapped to that swapfile suddenly gets overwritten by
1374 * the original zero's written out previously to the journal and
1375 * awaiting writeback in the kernel's buffer cache.
1376 *
1377 * So, if we see any bmap calls here on a modified, data-journaled file,
1378 * take extra steps to flush any blocks which might be in the cache.
1379 */
1380static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1381{
1382        struct inode *inode = mapping->host;
1383        journal_t *journal;
1384        int err;
1385
1386        if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1387                /*
1388                 * This is a REALLY heavyweight approach, but the use of
1389                 * bmap on dirty files is expected to be extremely rare:
1390                 * only if we run lilo or swapon on a freshly made file
1391                 * do we expect this to happen.
1392                 *
1393                 * (bmap requires CAP_SYS_RAWIO so this does not
1394                 * represent an unprivileged user DOS attack --- we'd be
1395                 * in trouble if mortal users could trigger this path at
1396                 * will.)
1397                 *
1398                 * NB. EXT3_STATE_JDATA is not set on files other than
1399                 * regular files.  If somebody wants to bmap a directory
1400                 * or symlink and gets confused because the buffer
1401                 * hasn't yet been flushed to disk, they deserve
1402                 * everything they get.
1403                 */
1404
1405                EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1406                journal = EXT3_JOURNAL(inode);
1407                journal_lock_updates(journal);
1408                err = journal_flush(journal);
1409                journal_unlock_updates(journal);
1410
1411                if (err)
1412                        return 0;
1413        }
1414
1415        return generic_block_bmap(mapping,block,ext3_get_block);
1416}
1417
1418static int bget_one(handle_t *handle, struct buffer_head *bh)
1419{
1420        get_bh(bh);
1421        return 0;
1422}
1423
1424static int bput_one(handle_t *handle, struct buffer_head *bh)
1425{
1426        put_bh(bh);
1427        return 0;
1428}
1429
1430static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1431{
1432        if (buffer_mapped(bh))
1433                return ext3_journal_dirty_data(handle, bh);
1434        return 0;
1435}
1436
1437/*
1438 * Note that we always start a transaction even if we're not journalling
1439 * data.  This is to preserve ordering: any hole instantiation within
1440 * __block_write_full_page -> ext3_get_block() should be journalled
1441 * along with the data so we don't crash and then get metadata which
1442 * refers to old data.
1443 *
1444 * In all journalling modes block_write_full_page() will start the I/O.
1445 *
1446 * Problem:
1447 *
1448 *        ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1449 *                ext3_writepage()
1450 *
1451 * Similar for:
1452 *
1453 *        ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1454 *
1455 * Same applies to ext3_get_block().  We will deadlock on various things like
1456 * lock_journal and i_truncate_mutex.
1457 *
1458 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1459 * allocations fail.
1460 *
1461 * 16May01: If we're reentered then journal_current_handle() will be
1462 *            non-zero. We simply *return*.
1463 *
1464 * 1 July 2001: @@@ FIXME:
1465 *   In journalled data mode, a data buffer may be metadata against the
1466 *   current transaction.  But the same file is part of a shared mapping
1467 *   and someone does a writepage() on it.
1468 *
1469 *   We will move the buffer onto the async_data list, but *after* it has
1470 *   been dirtied. So there's a small window where we have dirty data on
1471 *   BJ_Metadata.
1472 *
1473 *   Note that this only applies to the last partial page in the file.  The
1474 *   bit which block_write_full_page() uses prepare/commit for.  (That's
1475 *   broken code anyway: it's wrong for msync()).
1476 *
1477 *   It's a rare case: affects the final partial page, for journalled data
1478 *   where the file is subject to bith write() and writepage() in the same
1479 *   transction.  To fix it we'll need a custom block_write_full_page().
1480 *   We'll probably need that anyway for journalling writepage() output.
1481 *
1482 * We don't honour synchronous mounts for writepage().  That would be
1483 * disastrous.  Any write() or metadata operation will sync the fs for
1484 * us.
1485 *
1486 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1487 * we don't need to open a transaction here.
1488 */
1489static int ext3_ordered_writepage(struct page *page,
1490                                struct writeback_control *wbc)
1491{
1492        struct inode *inode = page->mapping->host;
1493        struct buffer_head *page_bufs;
1494        handle_t *handle = NULL;
1495        int ret = 0;
1496        int err;
1497
1498        J_ASSERT(PageLocked(page));
1499
1500        /*
1501         * We give up here if we're reentered, because it might be for a
1502         * different filesystem.
1503         */
1504        if (ext3_journal_current_handle())
1505                goto out_fail;
1506
1507        handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1508
1509        if (IS_ERR(handle)) {
1510                ret = PTR_ERR(handle);
1511                goto out_fail;
1512        }
1513
1514        if (!page_has_buffers(page)) {
1515                create_empty_buffers(page, inode->i_sb->s_blocksize,
1516                                (1 << BH_Dirty)|(1 << BH_Uptodate));
1517        }
1518        page_bufs = page_buffers(page);
1519        walk_page_buffers(handle, page_bufs, 0,
1520                        PAGE_CACHE_SIZE, NULL, bget_one);
1521
1522        ret = block_write_full_page(page, ext3_get_block, wbc);
1523
1524        /*
1525         * The page can become unlocked at any point now, and
1526         * truncate can then come in and change things.  So we
1527         * can't touch *page from now on.  But *page_bufs is
1528         * safe due to elevated refcount.
1529         */
1530
1531        /*
1532         * And attach them to the current transaction.  But only if
1533         * block_write_full_page() succeeded.  Otherwise they are unmapped,
1534         * and generally junk.
1535         */
1536        if (ret == 0) {
1537                err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1538                                        NULL, journal_dirty_data_fn);
1539                if (!ret)
1540                        ret = err;
1541        }
1542        walk_page_buffers(handle, page_bufs, 0,
1543                        PAGE_CACHE_SIZE, NULL, bput_one);
1544        err = ext3_journal_stop(handle);
1545        if (!ret)
1546                ret = err;
1547        return ret;
1548
1549out_fail:
1550        redirty_page_for_writepage(wbc, page);
1551        unlock_page(page);
1552        return ret;
1553}
1554
1555static int ext3_writeback_writepage(struct page *page,
1556                                struct writeback_control *wbc)
1557{
1558        struct inode *inode = page->mapping->host;
1559        handle_t *handle = NULL;
1560        int ret = 0;
1561        int err;
1562
1563        if (ext3_journal_current_handle())
1564                goto out_fail;
1565
1566        handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1567        if (IS_ERR(handle)) {
1568                ret = PTR_ERR(handle);
1569                goto out_fail;
1570        }
1571
1572        if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1573                ret = nobh_writepage(page, ext3_get_block, wbc);
1574        else
1575                ret = block_write_full_page(page, ext3_get_block, wbc);
1576
1577        err = ext3_journal_stop(handle);
1578        if (!ret)
1579                ret = err;
1580        return ret;
1581
1582out_fail:
1583        redirty_page_for_writepage(wbc, page);
1584        unlock_page(page);
1585        return ret;
1586}
1587
1588static int ext3_journalled_writepage(struct page *page,
1589                                struct writeback_control *wbc)
1590{
1591        struct inode *inode = page->mapping->host;
1592        handle_t *handle = NULL;
1593        int ret = 0;
1594        int err;
1595
1596        if (ext3_journal_current_handle())
1597                goto no_write;
1598
1599        handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1600        if (IS_ERR(handle)) {
1601                ret = PTR_ERR(handle);
1602                goto no_write;
1603        }
1604
1605        if (!page_has_buffers(page) || PageChecked(page)) {
1606                /*
1607                 * It's mmapped pagecache.  Add buffers and journal it.  There
1608                 * doesn't seem much point in redirtying the page here.
1609                 */
1610                ClearPageChecked(page);
1611                ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1612                                        ext3_get_block);
1613                if (ret != 0) {
1614                        ext3_journal_stop(handle);
1615                        goto out_unlock;
1616                }
1617                ret = walk_page_buffers(handle, page_buffers(page), 0,
1618                        PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1619
1620                err = walk_page_buffers(handle, page_buffers(page), 0,
1621                                PAGE_CACHE_SIZE, NULL, write_end_fn);
1622                if (ret == 0)
1623                        ret = err;
1624                EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1625                unlock_page(page);
1626        } else {
1627                /*
1628                 * It may be a page full of checkpoint-mode buffers.  We don't
1629                 * really know unless we go poke around in the buffer_heads.
1630                 * But block_write_full_page will do the right thing.
1631                 */
1632                ret = block_write_full_page(page, ext3_get_block, wbc);
1633        }
1634        err = ext3_journal_stop(handle);
1635        if (!ret)
1636                ret = err;
1637out:
1638        return ret;
1639
1640no_write:
1641        redirty_page_for_writepage(wbc, page);
1642out_unlock:
1643        unlock_page(page);
1644        goto out;
1645}
1646
1647static int ext3_readpage(struct file *file, struct page *page)
1648{
1649        return mpage_readpage(page, ext3_get_block);
1650}
1651
1652static int
1653ext3_readpages(struct file *file, struct address_space *mapping,
1654                struct list_head *pages, unsigned nr_pages)
1655{
1656        return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1657}
1658
1659static void ext3_invalidatepage(struct page *page, unsigned long offset)
1660{
1661        journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1662
1663        /*
1664         * If it's a full truncate we just forget about the pending dirtying
1665         */
1666        if (offset == 0)
1667                ClearPageChecked(page);
1668
1669        journal_invalidatepage(journal, page, offset);
1670}
1671
1672static int ext3_releasepage(struct page *page, gfp_t wait)
1673{
1674        journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1675
1676        WARN_ON(PageChecked(page));
1677        if (!page_has_buffers(page))
1678                return 0;
1679        return journal_try_to_free_buffers(journal, page, wait);
1680}
1681
1682/*
1683 * If the O_DIRECT write will extend the file then add this inode to the
1684 * orphan list.  So recovery will truncate it back to the original size
1685 * if the machine crashes during the write.
1686 *
1687 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1688 * crashes then stale disk data _may_ be exposed inside the file. But current
1689 * VFS code falls back into buffered path in that case so we are safe.
1690 */
1691static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1692                        const struct iovec *iov, loff_t offset,
1693                        unsigned long nr_segs)
1694{
1695        struct file *file = iocb->ki_filp;
1696        struct inode *inode = file->f_mapping->host;
1697        struct ext3_inode_info *ei = EXT3_I(inode);
1698        handle_t *handle;
1699        ssize_t ret;
1700        int orphan = 0;
1701        size_t count = iov_length(iov, nr_segs);
1702
1703        if (rw == WRITE) {
1704                loff_t final_size = offset + count;
1705
1706                if (final_size > inode->i_size) {
1707                        /* Credits for sb + inode write */
1708                        handle = ext3_journal_start(inode, 2);
1709                        if (IS_ERR(handle)) {
1710                                ret = PTR_ERR(handle);
1711                                goto out;
1712                        }
1713                        ret = ext3_orphan_add(handle, inode);
1714                        if (ret) {
1715                                ext3_journal_stop(handle);
1716                                goto out;
1717                        }
1718                        orphan = 1;
1719                        ei->i_disksize = inode->i_size;
1720                        ext3_journal_stop(handle);
1721                }
1722        }
1723
1724        ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1725                                 offset, nr_segs,
1726                                 ext3_get_block, NULL);
1727
1728        if (orphan) {
1729                int err;
1730
1731                /* Credits for sb + inode write */
1732                handle = ext3_journal_start(inode, 2);
1733                if (IS_ERR(handle)) {
1734                        /* This is really bad luck. We've written the data
1735                         * but cannot extend i_size. Bail out and pretend
1736                         * the write failed... */
1737                        ret = PTR_ERR(handle);
1738                        goto out;
1739                }
1740                if (inode->i_nlink)
1741                        ext3_orphan_del(handle, inode);
1742                if (ret > 0) {
1743                        loff_t end = offset + ret;
1744                        if (end > inode->i_size) {
1745                                ei->i_disksize = end;
1746                                i_size_write(inode, end);
1747                                /*
1748                                 * We're going to return a positive `ret'
1749                                 * here due to non-zero-length I/O, so there's
1750                                 * no way of reporting error returns from
1751                                 * ext3_mark_inode_dirty() to userspace.  So
1752                                 * ignore it.
1753                                 */
1754                                ext3_mark_inode_dirty(handle, inode);
1755                        }
1756                }
1757                err = ext3_journal_stop(handle);
1758                if (ret == 0)
1759                        ret = err;
1760        }
1761out:
1762        return ret;
1763}
1764
1765/*
1766 * Pages can be marked dirty completely asynchronously from ext3's journalling
1767 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1768 * much here because ->set_page_dirty is called under VFS locks.  The page is
1769 * not necessarily locked.
1770 *
1771 * We cannot just dirty the page and leave attached buffers clean, because the
1772 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1773 * or jbddirty because all the journalling code will explode.
1774 *
1775 * So what we do is to mark the page "pending dirty" and next time writepage
1776 * is called, propagate that into the buffers appropriately.
1777 */
1778static int ext3_journalled_set_page_dirty(struct page *page)
1779{
1780        SetPageChecked(page);
1781        return __set_page_dirty_nobuffers(page);
1782}
1783
1784static const struct address_space_operations ext3_ordered_aops = {
1785        .readpage                = ext3_readpage,
1786        .readpages                = ext3_readpages,
1787        .writepage                = ext3_ordered_writepage,
1788        .sync_page                = block_sync_page,
1789        .write_begin                = ext3_write_begin,
1790        .write_end                = ext3_ordered_write_end,
1791        .bmap                        = ext3_bmap,
1792        .invalidatepage                = ext3_invalidatepage,
1793        .releasepage                = ext3_releasepage,
1794        .direct_IO                = ext3_direct_IO,
1795        .migratepage                = buffer_migrate_page,
1796        .is_partially_uptodate  = block_is_partially_uptodate,
1797};
1798
1799static const struct address_space_operations ext3_writeback_aops = {
1800        .readpage                = ext3_readpage,
1801        .readpages                = ext3_readpages,
1802        .writepage                = ext3_writeback_writepage,
1803        .sync_page                = block_sync_page,
1804        .write_begin                = ext3_write_begin,
1805        .write_end                = ext3_writeback_write_end,
1806        .bmap                        = ext3_bmap,
1807        .invalidatepage                = ext3_invalidatepage,
1808        .releasepage                = ext3_releasepage,
1809        .direct_IO                = ext3_direct_IO,
1810        .migratepage                = buffer_migrate_page,
1811        .is_partially_uptodate  = block_is_partially_uptodate,
1812};
1813
1814static const struct address_space_operations ext3_journalled_aops = {
1815        .readpage                = ext3_readpage,
1816        .readpages                = ext3_readpages,
1817        .writepage                = ext3_journalled_writepage,
1818        .sync_page                = block_sync_page,
1819        .write_begin                = ext3_write_begin,
1820        .write_end                = ext3_journalled_write_end,
1821        .set_page_dirty                = ext3_journalled_set_page_dirty,
1822        .bmap                        = ext3_bmap,
1823        .invalidatepage                = ext3_invalidatepage,
1824        .releasepage                = ext3_releasepage,
1825        .is_partially_uptodate  = block_is_partially_uptodate,
1826};
1827
1828void ext3_set_aops(struct inode *inode)
1829{
1830        if (ext3_should_order_data(inode))
1831                inode->i_mapping->a_ops = &ext3_ordered_aops;
1832        else if (ext3_should_writeback_data(inode))
1833                inode->i_mapping->a_ops = &ext3_writeback_aops;
1834        else
1835                inode->i_mapping->a_ops = &ext3_journalled_aops;
1836}
1837
1838/*
1839 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1840 * up to the end of the block which corresponds to `from'.
1841 * This required during truncate. We need to physically zero the tail end
1842 * of that block so it doesn't yield old data if the file is later grown.
1843 */
1844static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1845                struct address_space *mapping, loff_t from)
1846{
1847        ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1848        unsigned offset = from & (PAGE_CACHE_SIZE-1);
1849        unsigned blocksize, iblock, length, pos;
1850        struct inode *inode = mapping->host;
1851        struct buffer_head *bh;
1852        int err = 0;
1853
1854        blocksize = inode->i_sb->s_blocksize;
1855        length = blocksize - (offset & (blocksize - 1));
1856        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1857
1858        /*
1859         * For "nobh" option,  we can only work if we don't need to
1860         * read-in the page - otherwise we create buffers to do the IO.
1861         */
1862        if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1863             ext3_should_writeback_data(inode) && PageUptodate(page)) {
1864                zero_user(page, offset, length);
1865                set_page_dirty(page);
1866                goto unlock;
1867        }
1868
1869        if (!page_has_buffers(page))
1870                create_empty_buffers(page, blocksize, 0);
1871
1872        /* Find the buffer that contains "offset" */
1873        bh = page_buffers(page);
1874        pos = blocksize;
1875        while (offset >= pos) {
1876                bh = bh->b_this_page;
1877                iblock++;
1878                pos += blocksize;
1879        }
1880
1881        err = 0;
1882        if (buffer_freed(bh)) {
1883                BUFFER_TRACE(bh, "freed: skip");
1884                goto unlock;
1885        }
1886
1887        if (!buffer_mapped(bh)) {
1888                BUFFER_TRACE(bh, "unmapped");
1889                ext3_get_block(inode, iblock, bh, 0);
1890                /* unmapped? It's a hole - nothing to do */
1891                if (!buffer_mapped(bh)) {
1892                        BUFFER_TRACE(bh, "still unmapped");
1893                        goto unlock;
1894                }
1895        }
1896
1897        /* Ok, it's mapped. Make sure it's up-to-date */
1898        if (PageUptodate(page))
1899                set_buffer_uptodate(bh);
1900
1901        if (!buffer_uptodate(bh)) {
1902                err = -EIO;
1903                ll_rw_block(READ, 1, &bh);
1904                wait_on_buffer(bh);
1905                /* Uhhuh. Read error. Complain and punt. */
1906                if (!buffer_uptodate(bh))
1907                        goto unlock;
1908        }
1909
1910        if (ext3_should_journal_data(inode)) {
1911                BUFFER_TRACE(bh, "get write access");
1912                err = ext3_journal_get_write_access(handle, bh);
1913                if (err)
1914                        goto unlock;
1915        }
1916
1917        zero_user(page, offset, length);
1918        BUFFER_TRACE(bh, "zeroed end of block");
1919
1920        err = 0;
1921        if (ext3_should_journal_data(inode)) {
1922                err = ext3_journal_dirty_metadata(handle, bh);
1923        } else {
1924                if (ext3_should_order_data(inode))
1925                        err = ext3_journal_dirty_data(handle, bh);
1926                mark_buffer_dirty(bh);
1927        }
1928
1929unlock:
1930        unlock_page(page);
1931        page_cache_release(page);
1932        return err;
1933}
1934
1935/*
1936 * Probably it should be a library function... search for first non-zero word
1937 * or memcmp with zero_page, whatever is better for particular architecture.
1938 * Linus?
1939 */
1940static inline int all_zeroes(__le32 *p, __le32 *q)
1941{
1942        while (p < q)
1943                if (*p++)
1944                        return 0;
1945        return 1;
1946}
1947
1948/**
1949 *        ext3_find_shared - find the indirect blocks for partial truncation.
1950 *        @inode:          inode in question
1951 *        @depth:          depth of the affected branch
1952 *        @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1953 *        @chain:          place to store the pointers to partial indirect blocks
1954 *        @top:          place to the (detached) top of branch
1955 *
1956 *        This is a helper function used by ext3_truncate().
1957 *
1958 *        When we do truncate() we may have to clean the ends of several
1959 *        indirect blocks but leave the blocks themselves alive. Block is
1960 *        partially truncated if some data below the new i_size is refered
1961 *        from it (and it is on the path to the first completely truncated
1962 *        data block, indeed).  We have to free the top of that path along
1963 *        with everything to the right of the path. Since no allocation
1964 *        past the truncation point is possible until ext3_truncate()
1965 *        finishes, we may safely do the latter, but top of branch may
1966 *        require special attention - pageout below the truncation point
1967 *        might try to populate it.
1968 *
1969 *        We atomically detach the top of branch from the tree, store the
1970 *        block number of its root in *@top, pointers to buffer_heads of
1971 *        partially truncated blocks - in @chain[].bh and pointers to
1972 *        their last elements that should not be removed - in
1973 *        @chain[].p. Return value is the pointer to last filled element
1974 *        of @chain.
1975 *
1976 *        The work left to caller to do the actual freeing of subtrees:
1977 *                a) free the subtree starting from *@top
1978 *                b) free the subtrees whose roots are stored in
1979 *                        (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1980 *                c) free the subtrees growing from the inode past the @chain[0].
1981 *                        (no partially truncated stuff there).  */
1982
1983static Indirect *ext3_find_shared(struct inode *inode, int depth,
1984                        int offsets[4], Indirect chain[4], __le32 *top)
1985{
1986        Indirect *partial, *p;
1987        int k, err;
1988
1989        *top = 0;
1990        /* Make k index the deepest non-null offest + 1 */
1991        for (k = depth; k > 1 && !offsets[k-1]; k--)
1992                ;
1993        partial = ext3_get_branch(inode, k, offsets, chain, &err);
1994        /* Writer: pointers */
1995        if (!partial)
1996                partial = chain + k-1;
1997        /*
1998         * If the branch acquired continuation since we've looked at it -
1999         * fine, it should all survive and (new) top doesn't belong to us.
2000         */
2001        if (!partial->key && *partial->p)
2002                /* Writer: end */
2003                goto no_top;
2004        for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2005                ;
2006        /*
2007         * OK, we've found the last block that must survive. The rest of our
2008         * branch should be detached before unlocking. However, if that rest
2009         * of branch is all ours and does not grow immediately from the inode
2010         * it's easier to cheat and just decrement partial->p.
2011         */
2012        if (p == chain + k - 1 && p > chain) {
2013                p->p--;
2014        } else {
2015                *top = *p->p;
2016                /* Nope, don't do this in ext3.  Must leave the tree intact */
2017#if 0
2018                *p->p = 0;
2019#endif
2020        }
2021        /* Writer: end */
2022
2023        while(partial > p) {
2024                brelse(partial->bh);
2025                partial--;
2026        }
2027no_top:
2028        return partial;
2029}
2030
2031/*
2032 * Zero a number of block pointers in either an inode or an indirect block.
2033 * If we restart the transaction we must again get write access to the
2034 * indirect block for further modification.
2035 *
2036 * We release `count' blocks on disk, but (last - first) may be greater
2037 * than `count' because there can be holes in there.
2038 */
2039static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2040                struct buffer_head *bh, ext3_fsblk_t block_to_free,
2041                unsigned long count, __le32 *first, __le32 *last)
2042{
2043        __le32 *p;
2044        if (try_to_extend_transaction(handle, inode)) {
2045                if (bh) {
2046                        BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2047                        ext3_journal_dirty_metadata(handle, bh);
2048                }
2049                ext3_mark_inode_dirty(handle, inode);
2050                ext3_journal_test_restart(handle, inode);
2051                if (bh) {
2052                        BUFFER_TRACE(bh, "retaking write access");
2053                        ext3_journal_get_write_access(handle, bh);
2054                }
2055        }
2056
2057        /*
2058         * Any buffers which are on the journal will be in memory. We find
2059         * them on the hash table so journal_revoke() will run journal_forget()
2060         * on them.  We've already detached each block from the file, so
2061         * bforget() in journal_forget() should be safe.
2062         *
2063         * AKPM: turn on bforget in journal_forget()!!!
2064         */
2065        for (p = first; p < last; p++) {
2066                u32 nr = le32_to_cpu(*p);
2067                if (nr) {
2068                        struct buffer_head *bh;
2069
2070                        *p = 0;
2071                        bh = sb_find_get_block(inode->i_sb, nr);
2072                        ext3_forget(handle, 0, inode, bh, nr);
2073                }
2074        }
2075
2076        ext3_free_blocks(handle, inode, block_to_free, count);
2077}
2078
2079/**
2080 * ext3_free_data - free a list of data blocks
2081 * @handle:        handle for this transaction
2082 * @inode:        inode we are dealing with
2083 * @this_bh:        indirect buffer_head which contains *@first and *@last
2084 * @first:        array of block numbers
2085 * @last:        points immediately past the end of array
2086 *
2087 * We are freeing all blocks refered from that array (numbers are stored as
2088 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2089 *
2090 * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2091 * blocks are contiguous then releasing them at one time will only affect one
2092 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2093 * actually use a lot of journal space.
2094 *
2095 * @this_bh will be %NULL if @first and @last point into the inode's direct
2096 * block pointers.
2097 */
2098static void ext3_free_data(handle_t *handle, struct inode *inode,
2099                           struct buffer_head *this_bh,
2100                           __le32 *first, __le32 *last)
2101{
2102        ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2103        unsigned long count = 0;            /* Number of blocks in the run */
2104        __le32 *block_to_free_p = NULL;            /* Pointer into inode/ind
2105                                               corresponding to
2106                                               block_to_free */
2107        ext3_fsblk_t nr;                    /* Current block # */
2108        __le32 *p;                            /* Pointer into inode/ind
2109                                               for current block */
2110        int err;
2111
2112        if (this_bh) {                                /* For indirect block */
2113                BUFFER_TRACE(this_bh, "get_write_access");
2114                err = ext3_journal_get_write_access(handle, this_bh);
2115                /* Important: if we can't update the indirect pointers
2116                 * to the blocks, we can't free them. */
2117                if (err)
2118                        return;
2119        }
2120
2121        for (p = first; p < last; p++) {
2122                nr = le32_to_cpu(*p);
2123                if (nr) {
2124                        /* accumulate blocks to free if they're contiguous */
2125                        if (count == 0) {
2126                                block_to_free = nr;
2127                                block_to_free_p = p;
2128                                count = 1;
2129                        } else if (nr == block_to_free + count) {
2130                                count++;
2131                        } else {
2132                                ext3_clear_blocks(handle, inode, this_bh,
2133                                                  block_to_free,
2134                                                  count, block_to_free_p, p);
2135                                block_to_free = nr;
2136                                block_to_free_p = p;
2137                                count = 1;
2138                        }
2139                }
2140        }
2141
2142        if (count > 0)
2143                ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2144                                  count, block_to_free_p, p);
2145
2146        if (this_bh) {
2147                BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2148
2149                /*
2150                 * The buffer head should have an attached journal head at this
2151                 * point. However, if the data is corrupted and an indirect
2152                 * block pointed to itself, it would have been detached when
2153                 * the block was cleared. Check for this instead of OOPSing.
2154                 */
2155                if (bh2jh(this_bh))
2156                        ext3_journal_dirty_metadata(handle, this_bh);
2157                else
2158                        ext3_error(inode->i_sb, "ext3_free_data",
2159                                   "circular indirect block detected, "
2160                                   "inode=%lu, block=%llu",
2161                                   inode->i_ino,
2162                                   (unsigned long long)this_bh->b_blocknr);
2163        }
2164}
2165
2166/**
2167 *        ext3_free_branches - free an array of branches
2168 *        @handle: JBD handle for this transaction
2169 *        @inode:        inode we are dealing with
2170 *        @parent_bh: the buffer_head which contains *@first and *@last
2171 *        @first:        array of block numbers
2172 *        @last:        pointer immediately past the end of array
2173 *        @depth:        depth of the branches to free
2174 *
2175 *        We are freeing all blocks refered from these branches (numbers are
2176 *        stored as little-endian 32-bit) and updating @inode->i_blocks
2177 *        appropriately.
2178 */
2179static void ext3_free_branches(handle_t *handle, struct inode *inode,
2180                               struct buffer_head *parent_bh,
2181                               __le32 *first, __le32 *last, int depth)
2182{
2183        ext3_fsblk_t nr;
2184        __le32 *p;
2185
2186        if (is_handle_aborted(handle))
2187                return;
2188
2189        if (depth--) {
2190                struct buffer_head *bh;
2191                int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2192                p = last;
2193                while (--p >= first) {
2194                        nr = le32_to_cpu(*p);
2195                        if (!nr)
2196                                continue;                /* A hole */
2197
2198                        /* Go read the buffer for the next level down */
2199                        bh = sb_bread(inode->i_sb, nr);
2200
2201                        /*
2202                         * A read failure? Report error and clear slot
2203                         * (should be rare).
2204                         */
2205                        if (!bh) {
2206                                ext3_error(inode->i_sb, "ext3_free_branches",
2207                                           "Read failure, inode=%lu, block="E3FSBLK,
2208                                           inode->i_ino, nr);
2209                                continue;
2210                        }
2211
2212                        /* This zaps the entire block.  Bottom up. */
2213                        BUFFER_TRACE(bh, "free child branches");
2214                        ext3_free_branches(handle, inode, bh,
2215                                           (__le32*)bh->b_data,
2216                                           (__le32*)bh->b_data + addr_per_block,
2217                                           depth);
2218
2219                        /*
2220                         * We've probably journalled the indirect block several
2221                         * times during the truncate.  But it's no longer
2222                         * needed and we now drop it from the transaction via
2223                         * journal_revoke().
2224                         *
2225                         * That's easy if it's exclusively part of this
2226                         * transaction.  But if it's part of the committing
2227                         * transaction then journal_forget() will simply
2228                         * brelse() it.  That means that if the underlying
2229                         * block is reallocated in ext3_get_block(),
2230                         * unmap_underlying_metadata() will find this block
2231                         * and will try to get rid of it.  damn, damn.
2232                         *
2233                         * If this block has already been committed to the
2234                         * journal, a revoke record will be written.  And
2235                         * revoke records must be emitted *before* clearing
2236                         * this block's bit in the bitmaps.
2237                         */
2238                        ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2239
2240                        /*
2241                         * Everything below this this pointer has been
2242                         * released.  Now let this top-of-subtree go.
2243                         *
2244                         * We want the freeing of this indirect block to be
2245                         * atomic in the journal with the updating of the
2246                         * bitmap block which owns it.  So make some room in
2247                         * the journal.
2248                         *
2249                         * We zero the parent pointer *after* freeing its
2250                         * pointee in the bitmaps, so if extend_transaction()
2251                         * for some reason fails to put the bitmap changes and
2252                         * the release into the same transaction, recovery
2253                         * will merely complain about releasing a free block,
2254                         * rather than leaking blocks.
2255                         */
2256                        if (is_handle_aborted(handle))
2257                                return;
2258                        if (try_to_extend_transaction(handle, inode)) {
2259                                ext3_mark_inode_dirty(handle, inode);
2260                                ext3_journal_test_restart(handle, inode);
2261                        }
2262
2263                        ext3_free_blocks(handle, inode, nr, 1);
2264
2265                        if (parent_bh) {
2266                                /*
2267                                 * The block which we have just freed is
2268                                 * pointed to by an indirect block: journal it
2269                                 */
2270                                BUFFER_TRACE(parent_bh, "get_write_access");
2271                                if (!ext3_journal_get_write_access(handle,
2272                                                                   parent_bh)){
2273                                        *p = 0;
2274                                        BUFFER_TRACE(parent_bh,
2275                                        "call ext3_journal_dirty_metadata");
2276                                        ext3_journal_dirty_metadata(handle,
2277                                                                    parent_bh);
2278                                }
2279                        }
2280                }
2281        } else {
2282                /* We have reached the bottom of the tree. */
2283                BUFFER_TRACE(parent_bh, "free data blocks");
2284                ext3_free_data(handle, inode, parent_bh, first, last);
2285        }
2286}
2287
2288int ext3_can_truncate(struct inode *inode)
2289{
2290        if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2291                return 0;
2292        if (S_ISREG(inode->i_mode))
2293                return 1;
2294        if (S_ISDIR(inode->i_mode))
2295                return 1;
2296        if (S_ISLNK(inode->i_mode))
2297                return !ext3_inode_is_fast_symlink(inode);
2298        return 0;
2299}
2300
2301/*
2302 * ext3_truncate()
2303 *
2304 * We block out ext3_get_block() block instantiations across the entire
2305 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2306 * simultaneously on behalf of the same inode.
2307 *
2308 * As we work through the truncate and commmit bits of it to the journal there
2309 * is one core, guiding principle: the file's tree must always be consistent on
2310 * disk.  We must be able to restart the truncate after a crash.
2311 *
2312 * The file's tree may be transiently inconsistent in memory (although it
2313 * probably isn't), but whenever we close off and commit a journal transaction,
2314 * the contents of (the filesystem + the journal) must be consistent and
2315 * restartable.  It's pretty simple, really: bottom up, right to left (although
2316 * left-to-right works OK too).
2317 *
2318 * Note that at recovery time, journal replay occurs *before* the restart of
2319 * truncate against the orphan inode list.
2320 *
2321 * The committed inode has the new, desired i_size (which is the same as
2322 * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2323 * that this inode's truncate did not complete and it will again call
2324 * ext3_truncate() to have another go.  So there will be instantiated blocks
2325 * to the right of the truncation point in a crashed ext3 filesystem.  But
2326 * that's fine - as long as they are linked from the inode, the post-crash
2327 * ext3_truncate() run will find them and release them.
2328 */
2329void ext3_truncate(struct inode *inode)
2330{
2331        handle_t *handle;
2332        struct ext3_inode_info *ei = EXT3_I(inode);
2333        __le32 *i_data = ei->i_data;
2334        int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2335        struct address_space *mapping = inode->i_mapping;
2336        int offsets[4];
2337        Indirect chain[4];
2338        Indirect *partial;
2339        __le32 nr = 0;
2340        int n;
2341        long last_block;
2342        unsigned blocksize = inode->i_sb->s_blocksize;
2343        struct page *page;
2344
2345        if (!ext3_can_truncate(inode))
2346                return;
2347
2348        /*
2349         * We have to lock the EOF page here, because lock_page() nests
2350         * outside journal_start().
2351         */
2352        if ((inode->i_size & (blocksize - 1)) == 0) {
2353                /* Block boundary? Nothing to do */
2354                page = NULL;
2355        } else {
2356                page = grab_cache_page(mapping,
2357                                inode->i_size >> PAGE_CACHE_SHIFT);
2358                if (!page)
2359                        return;
2360        }
2361
2362        handle = start_transaction(inode);
2363        if (IS_ERR(handle)) {
2364                if (page) {
2365                        clear_highpage(page);
2366                        flush_dcache_page(page);
2367                        unlock_page(page);
2368                        page_cache_release(page);
2369                }
2370                return;                /* AKPM: return what? */
2371        }
2372
2373        last_block = (inode->i_size + blocksize-1)
2374                                        >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2375
2376        if (page)
2377                ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2378
2379        n = ext3_block_to_path(inode, last_block, offsets, NULL);
2380        if (n == 0)
2381                goto out_stop;        /* error */
2382
2383        /*
2384         * OK.  This truncate is going to happen.  We add the inode to the
2385         * orphan list, so that if this truncate spans multiple transactions,
2386         * and we crash, we will resume the truncate when the filesystem
2387         * recovers.  It also marks the inode dirty, to catch the new size.
2388         *
2389         * Implication: the file must always be in a sane, consistent
2390         * truncatable state while each transaction commits.
2391         */
2392        if (ext3_orphan_add(handle, inode))
2393                goto out_stop;
2394
2395        /*
2396         * The orphan list entry will now protect us from any crash which
2397         * occurs before the truncate completes, so it is now safe to propagate
2398         * the new, shorter inode size (held for now in i_size) into the
2399         * on-disk inode. We do this via i_disksize, which is the value which
2400         * ext3 *really* writes onto the disk inode.
2401         */
2402        ei->i_disksize = inode->i_size;
2403
2404        /*
2405         * From here we block out all ext3_get_block() callers who want to
2406         * modify the block allocation tree.
2407         */
2408        mutex_lock(&ei->truncate_mutex);
2409
2410        if (n == 1) {                /* direct blocks */
2411                ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2412                               i_data + EXT3_NDIR_BLOCKS);
2413                goto do_indirects;
2414        }
2415
2416        partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2417        /* Kill the top of shared branch (not detached) */
2418        if (nr) {
2419                if (partial == chain) {
2420                        /* Shared branch grows from the inode */
2421                        ext3_free_branches(handle, inode, NULL,
2422                                           &nr, &nr+1, (chain+n-1) - partial);
2423                        *partial->p = 0;
2424                        /*
2425                         * We mark the inode dirty prior to restart,
2426                         * and prior to stop.  No need for it here.
2427                         */
2428                } else {
2429                        /* Shared branch grows from an indirect block */
2430                        BUFFER_TRACE(partial->bh, "get_write_access");
2431                        ext3_free_branches(handle, inode, partial->bh,
2432                                        partial->p,
2433                                        partial->p+1, (chain+n-1) - partial);
2434                }
2435        }
2436        /* Clear the ends of indirect blocks on the shared branch */
2437        while (partial > chain) {
2438                ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2439                                   (__le32*)partial->bh->b_data+addr_per_block,
2440                                   (chain+n-1) - partial);
2441                BUFFER_TRACE(partial->bh, "call brelse");
2442                brelse (partial->bh);
2443                partial--;
2444        }
2445do_indirects:
2446        /* Kill the remaining (whole) subtrees */
2447        switch (offsets[0]) {
2448        default:
2449                nr = i_data[EXT3_IND_BLOCK];
2450                if (nr) {
2451                        ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2452                        i_data[EXT3_IND_BLOCK] = 0;
2453                }
2454        case EXT3_IND_BLOCK:
2455                nr = i_data[EXT3_DIND_BLOCK];
2456                if (nr) {
2457                        ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2458                        i_data[EXT3_DIND_BLOCK] = 0;
2459                }
2460        case EXT3_DIND_BLOCK:
2461                nr = i_data[EXT3_TIND_BLOCK];
2462                if (nr) {
2463                        ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2464                        i_data[EXT3_TIND_BLOCK] = 0;
2465                }
2466        case EXT3_TIND_BLOCK:
2467                ;
2468        }
2469
2470        ext3_discard_reservation(inode);
2471
2472        mutex_unlock(&ei->truncate_mutex);
2473        inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2474        ext3_mark_inode_dirty(handle, inode);
2475
2476        /*
2477         * In a multi-transaction truncate, we only make the final transaction
2478         * synchronous
2479         */
2480        if (IS_SYNC(inode))
2481                handle->h_sync = 1;
2482out_stop:
2483        /*
2484         * If this was a simple ftruncate(), and the file will remain alive
2485         * then we need to clear up the orphan record which we created above.
2486         * However, if this was a real unlink then we were called by
2487         * ext3_delete_inode(), and we allow that function to clean up the
2488         * orphan info for us.
2489         */
2490        if (inode->i_nlink)
2491                ext3_orphan_del(handle, inode);
2492
2493        ext3_journal_stop(handle);
2494}
2495
2496static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2497                unsigned long ino, struct ext3_iloc *iloc)
2498{
2499        unsigned long block_group;
2500        unsigned long offset;
2501        ext3_fsblk_t block;
2502        struct ext3_group_desc *gdp;
2503
2504        if (!ext3_valid_inum(sb, ino)) {
2505                /*
2506                 * This error is already checked for in namei.c unless we are
2507                 * looking at an NFS filehandle, in which case no error
2508                 * report is needed
2509                 */
2510                return 0;
2511        }
2512
2513        block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2514        gdp = ext3_get_group_desc(sb, block_group, NULL);
2515        if (!gdp)
2516                return 0;
2517        /*
2518         * Figure out the offset within the block group inode table
2519         */
2520        offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2521                EXT3_INODE_SIZE(sb);
2522        block = le32_to_cpu(gdp->bg_inode_table) +
2523                (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2524
2525        iloc->block_group = block_group;
2526        iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2527        return block;
2528}
2529
2530/*
2531 * ext3_get_inode_loc returns with an extra refcount against the inode's
2532 * underlying buffer_head on success. If 'in_mem' is true, we have all
2533 * data in memory that is needed to recreate the on-disk version of this
2534 * inode.
2535 */
2536static int __ext3_get_inode_loc(struct inode *inode,
2537                                struct ext3_iloc *iloc, int in_mem)
2538{
2539        ext3_fsblk_t block;
2540        struct buffer_head *bh;
2541
2542        block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2543        if (!block)
2544                return -EIO;
2545
2546        bh = sb_getblk(inode->i_sb, block);
2547        if (!bh) {
2548                ext3_error (inode->i_sb, "ext3_get_inode_loc",
2549                                "unable to read inode block - "
2550                                "inode=%lu, block="E3FSBLK,
2551                                 inode->i_ino, block);
2552                return -EIO;
2553        }
2554        if (!buffer_uptodate(bh)) {
2555                lock_buffer(bh);
2556
2557                /*
2558                 * If the buffer has the write error flag, we have failed
2559                 * to write out another inode in the same block.  In this
2560                 * case, we don't have to read the block because we may
2561                 * read the old inode data successfully.
2562                 */
2563                if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2564                        set_buffer_uptodate(bh);
2565
2566                if (buffer_uptodate(bh)) {
2567                        /* someone brought it uptodate while we waited */
2568                        unlock_buffer(bh);
2569                        goto has_buffer;
2570                }
2571
2572                /*
2573                 * If we have all information of the inode in memory and this
2574                 * is the only valid inode in the block, we need not read the
2575                 * block.
2576                 */
2577                if (in_mem) {
2578                        struct buffer_head *bitmap_bh;
2579                        struct ext3_group_desc *desc;
2580                        int inodes_per_buffer;
2581                        int inode_offset, i;
2582                        int block_group;
2583                        int start;
2584
2585                        block_group = (inode->i_ino - 1) /
2586                                        EXT3_INODES_PER_GROUP(inode->i_sb);
2587                        inodes_per_buffer = bh->b_size /
2588                                EXT3_INODE_SIZE(inode->i_sb);
2589                        inode_offset = ((inode->i_ino - 1) %
2590                                        EXT3_INODES_PER_GROUP(inode->i_sb));
2591                        start = inode_offset & ~(inodes_per_buffer - 1);
2592
2593                        /* Is the inode bitmap in cache? */
2594                        desc = ext3_get_group_desc(inode->i_sb,
2595                                                block_group, NULL);
2596                        if (!desc)
2597                                goto make_io;
2598
2599                        bitmap_bh = sb_getblk(inode->i_sb,
2600                                        le32_to_cpu(desc->bg_inode_bitmap));
2601                        if (!bitmap_bh)
2602                                goto make_io;
2603
2604                        /*
2605                         * If the inode bitmap isn't in cache then the
2606                         * optimisation may end up performing two reads instead
2607                         * of one, so skip it.
2608                         */
2609                        if (!buffer_uptodate(bitmap_bh)) {
2610                                brelse(bitmap_bh);
2611                                goto make_io;
2612                        }
2613                        for (i = start; i < start + inodes_per_buffer; i++) {
2614                                if (i == inode_offset)
2615                                        continue;
2616                                if (ext3_test_bit(i, bitmap_bh->b_data))
2617                                        break;
2618                        }
2619                        brelse(bitmap_bh);
2620                        if (i == start + inodes_per_buffer) {
2621                                /* all other inodes are free, so skip I/O */
2622                                memset(bh->b_data, 0, bh->b_size);
2623                                set_buffer_uptodate(bh);
2624                                unlock_buffer(bh);
2625                                goto has_buffer;
2626                        }
2627                }
2628
2629make_io:
2630                /*
2631                 * There are other valid inodes in the buffer, this inode
2632                 * has in-inode xattrs, or we don't have this inode in memory.
2633                 * Read the block from disk.
2634                 */
2635                get_bh(bh);
2636                bh->b_end_io = end_buffer_read_sync;
2637                submit_bh(READ_META, bh);
2638                wait_on_buffer(bh);
2639                if (!buffer_uptodate(bh)) {
2640                        ext3_error(inode->i_sb, "ext3_get_inode_loc",
2641                                        "unable to read inode block - "
2642                                        "inode=%lu, block="E3FSBLK,
2643                                        inode->i_ino, block);
2644                        brelse(bh);
2645                        return -EIO;
2646                }
2647        }
2648has_buffer:
2649        iloc->bh = bh;
2650        return 0;
2651}
2652
2653int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2654{
2655        /* We have all inode data except xattrs in memory here. */
2656        return __ext3_get_inode_loc(inode, iloc,
2657                !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
2658}
2659
2660void ext3_set_inode_flags(struct inode *inode)
2661{
2662        unsigned int flags = EXT3_I(inode)->i_flags;
2663
2664        inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2665        if (flags & EXT3_SYNC_FL)
2666                inode->i_flags |= S_SYNC;
2667        if (flags & EXT3_APPEND_FL)
2668                inode->i_flags |= S_APPEND;
2669        if (flags & EXT3_IMMUTABLE_FL)
2670                inode->i_flags |= S_IMMUTABLE;
2671        if (flags & EXT3_NOATIME_FL)
2672                inode->i_flags |= S_NOATIME;
2673        if (flags & EXT3_DIRSYNC_FL)
2674                inode->i_flags |= S_DIRSYNC;
2675}
2676
2677/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2678void ext3_get_inode_flags(struct ext3_inode_info *ei)
2679{
2680        unsigned int flags = ei->vfs_inode.i_flags;
2681
2682        ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2683                        EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2684        if (flags & S_SYNC)
2685                ei->i_flags |= EXT3_SYNC_FL;
2686        if (flags & S_APPEND)
2687                ei->i_flags |= EXT3_APPEND_FL;
2688        if (flags & S_IMMUTABLE)
2689                ei->i_flags |= EXT3_IMMUTABLE_FL;
2690        if (flags & S_NOATIME)
2691                ei->i_flags |= EXT3_NOATIME_FL;
2692        if (flags & S_DIRSYNC)
2693                ei->i_flags |= EXT3_DIRSYNC_FL;
2694}
2695
2696struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2697{
2698        struct ext3_iloc iloc;
2699        struct ext3_inode *raw_inode;
2700        struct ext3_inode_info *ei;
2701        struct buffer_head *bh;
2702        struct inode *inode;
2703        long ret;
2704        int block;
2705
2706        inode = iget_locked(sb, ino);
2707        if (!inode)
2708                return ERR_PTR(-ENOMEM);
2709        if (!(inode->i_state & I_NEW))
2710                return inode;
2711
2712        ei = EXT3_I(inode);
2713#ifdef CONFIG_EXT3_FS_POSIX_ACL
2714        ei->i_acl = EXT3_ACL_NOT_CACHED;
2715        ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2716#endif
2717        ei->i_block_alloc_info = NULL;
2718
2719        ret = __ext3_get_inode_loc(inode, &iloc, 0);
2720        if (ret < 0)
2721                goto bad_inode;
2722        bh = iloc.bh;
2723        raw_inode = ext3_raw_inode(&iloc);
2724        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2725        inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2726        inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2727        if(!(test_opt (inode->i_sb, NO_UID32))) {
2728                inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2729                inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2730        }
2731        inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2732        inode->i_size = le32_to_cpu(raw_inode->i_size);
2733        inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2734        inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2735        inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2736        inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2737
2738        ei->i_state = 0;
2739        ei->i_dir_start_lookup = 0;
2740        ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2741        /* We now have enough fields to check if the inode was active or not.
2742         * This is needed because nfsd might try to access dead inodes
2743         * the test is that same one that e2fsck uses
2744         * NeilBrown 1999oct15
2745         */
2746        if (inode->i_nlink == 0) {
2747                if (inode->i_mode == 0 ||
2748                    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2749                        /* this inode is deleted */
2750                        brelse (bh);
2751                        ret = -ESTALE;
2752                        goto bad_inode;
2753                }
2754                /* The only unlinked inodes we let through here have
2755                 * valid i_mode and are being read by the orphan
2756                 * recovery code: that's fine, we're about to complete
2757                 * the process of deleting those. */
2758        }
2759        inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2760        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2761#ifdef EXT3_FRAGMENTS
2762        ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2763        ei->i_frag_no = raw_inode->i_frag;
2764        ei->i_frag_size = raw_inode->i_fsize;
2765#endif
2766        ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2767        if (!S_ISREG(inode->i_mode)) {
2768                ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2769        } else {
2770                inode->i_size |=
2771                        ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2772        }
2773        ei->i_disksize = inode->i_size;
2774        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2775        ei->i_block_group = iloc.block_group;
2776        /*
2777         * NOTE! The in-memory inode i_data array is in little-endian order
2778         * even on big-endian machines: we do NOT byteswap the block numbers!
2779         */
2780        for (block = 0; block < EXT3_N_BLOCKS; block++)
2781                ei->i_data[block] = raw_inode->i_block[block];
2782        INIT_LIST_HEAD(&ei->i_orphan);
2783
2784        if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2785            EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2786                /*
2787                 * When mke2fs creates big inodes it does not zero out
2788                 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
2789                 * so ignore those first few inodes.
2790                 */
2791                ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2792                if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2793                    EXT3_INODE_SIZE(inode->i_sb)) {
2794                        brelse (bh);
2795                        ret = -EIO;
2796                        goto bad_inode;
2797                }
2798                if (ei->i_extra_isize == 0) {
2799                        /* The extra space is currently unused. Use it. */
2800                        ei->i_extra_isize = sizeof(struct ext3_inode) -
2801                                            EXT3_GOOD_OLD_INODE_SIZE;
2802                } else {
2803                        __le32 *magic = (void *)raw_inode +
2804                                        EXT3_GOOD_OLD_INODE_SIZE +
2805                                        ei->i_extra_isize;
2806                        if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2807                                 ei->i_state |= EXT3_STATE_XATTR;
2808                }
2809        } else
2810                ei->i_extra_isize = 0;
2811
2812        if (S_ISREG(inode->i_mode)) {
2813                inode->i_op = &ext3_file_inode_operations;
2814                inode->i_fop = &ext3_file_operations;
2815                ext3_set_aops(inode);
2816        } else if (S_ISDIR(inode->i_mode)) {
2817                inode->i_op = &ext3_dir_inode_operations;
2818                inode->i_fop = &ext3_dir_operations;
2819        } else if (S_ISLNK(inode->i_mode)) {
2820                if (ext3_inode_is_fast_symlink(inode))
2821                        inode->i_op = &ext3_fast_symlink_inode_operations;
2822                else {
2823                        inode->i_op = &ext3_symlink_inode_operations;
2824                        ext3_set_aops(inode);
2825                }
2826        } else {
2827                inode->i_op = &ext3_special_inode_operations;
2828                if (raw_inode->i_block[0])
2829                        init_special_inode(inode, inode->i_mode,
2830                           old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2831                else
2832                        init_special_inode(inode, inode->i_mode,
2833                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2834        }
2835        brelse (iloc.bh);
2836        ext3_set_inode_flags(inode);
2837        unlock_new_inode(inode);
2838        return inode;
2839
2840bad_inode:
2841        iget_failed(inode);
2842        return ERR_PTR(ret);
2843}
2844
2845/*
2846 * Post the struct inode info into an on-disk inode location in the
2847 * buffer-cache.  This gobbles the caller's reference to the
2848 * buffer_head in the inode location struct.
2849 *
2850 * The caller must have write access to iloc->bh.
2851 */
2852static int ext3_do_update_inode(handle_t *handle,
2853                                struct inode *inode,
2854                                struct ext3_iloc *iloc)
2855{
2856        struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2857        struct ext3_inode_info *ei = EXT3_I(inode);
2858        struct buffer_head *bh = iloc->bh;
2859        int err = 0, rc, block;
2860
2861        /* For fields not not tracking in the in-memory inode,
2862         * initialise them to zero for new inodes. */
2863        if (ei->i_state & EXT3_STATE_NEW)
2864                memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2865
2866        ext3_get_inode_flags(ei);
2867        raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2868        if(!(test_opt(inode->i_sb, NO_UID32))) {
2869                raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2870                raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2871/*
2872 * Fix up interoperability with old kernels. Otherwise, old inodes get
2873 * re-used with the upper 16 bits of the uid/gid intact
2874 */
2875                if(!ei->i_dtime) {
2876                        raw_inode->i_uid_high =
2877                                cpu_to_le16(high_16_bits(inode->i_uid));
2878                        raw_inode->i_gid_high =
2879                                cpu_to_le16(high_16_bits(inode->i_gid));
2880                } else {
2881                        raw_inode->i_uid_high = 0;
2882                        raw_inode->i_gid_high = 0;
2883                }
2884        } else {
2885                raw_inode->i_uid_low =
2886                        cpu_to_le16(fs_high2lowuid(inode->i_uid));
2887                raw_inode->i_gid_low =
2888                        cpu_to_le16(fs_high2lowgid(inode->i_gid));
2889                raw_inode->i_uid_high = 0;
2890                raw_inode->i_gid_high = 0;
2891        }
2892        raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2893        raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2894        raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2895        raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2896        raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2897        raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2898        raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2899        raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2900#ifdef EXT3_FRAGMENTS
2901        raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2902        raw_inode->i_frag = ei->i_frag_no;
2903        raw_inode->i_fsize = ei->i_frag_size;
2904#endif
2905        raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2906        if (!S_ISREG(inode->i_mode)) {
2907                raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2908        } else {
2909                raw_inode->i_size_high =
2910                        cpu_to_le32(ei->i_disksize >> 32);
2911                if (ei->i_disksize > 0x7fffffffULL) {
2912                        struct super_block *sb = inode->i_sb;
2913                        if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2914                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2915                            EXT3_SB(sb)->s_es->s_rev_level ==
2916                                        cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2917                               /* If this is the first large file
2918                                * created, add a flag to the superblock.
2919                                */
2920                                err = ext3_journal_get_write_access(handle,
2921                                                EXT3_SB(sb)->s_sbh);
2922                                if (err)
2923                                        goto out_brelse;
2924                                ext3_update_dynamic_rev(sb);
2925                                EXT3_SET_RO_COMPAT_FEATURE(sb,
2926                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2927                                sb->s_dirt = 1;
2928                                handle->h_sync = 1;
2929                                err = ext3_journal_dirty_metadata(handle,
2930                                                EXT3_SB(sb)->s_sbh);
2931                        }
2932                }
2933        }
2934        raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2935        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2936                if (old_valid_dev(inode->i_rdev)) {
2937                        raw_inode->i_block[0] =
2938                                cpu_to_le32(old_encode_dev(inode->i_rdev));
2939                        raw_inode->i_block[1] = 0;
2940                } else {
2941                        raw_inode->i_block[0] = 0;
2942                        raw_inode->i_block[1] =
2943                                cpu_to_le32(new_encode_dev(inode->i_rdev));
2944                        raw_inode->i_block[2] = 0;
2945                }
2946        } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2947                raw_inode->i_block[block] = ei->i_data[block];
2948
2949        if (ei->i_extra_isize)
2950                raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2951
2952        BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2953        rc = ext3_journal_dirty_metadata(handle, bh);
2954        if (!err)
2955                err = rc;
2956        ei->i_state &= ~EXT3_STATE_NEW;
2957
2958out_brelse:
2959        brelse (bh);
2960        ext3_std_error(inode->i_sb, err);
2961        return err;
2962}
2963
2964/*
2965 * ext3_write_inode()
2966 *
2967 * We are called from a few places:
2968 *
2969 * - Within generic_file_write() for O_SYNC files.
2970 *   Here, there will be no transaction running. We wait for any running
2971 *   trasnaction to commit.
2972 *
2973 * - Within sys_sync(), kupdate and such.
2974 *   We wait on commit, if tol to.
2975 *
2976 * - Within prune_icache() (PF_MEMALLOC == true)
2977 *   Here we simply return.  We can't afford to block kswapd on the
2978 *   journal commit.
2979 *
2980 * In all cases it is actually safe for us to return without doing anything,
2981 * because the inode has been copied into a raw inode buffer in
2982 * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
2983 * knfsd.
2984 *
2985 * Note that we are absolutely dependent upon all inode dirtiers doing the
2986 * right thing: they *must* call mark_inode_dirty() after dirtying info in
2987 * which we are interested.
2988 *
2989 * It would be a bug for them to not do this.  The code:
2990 *
2991 *        mark_inode_dirty(inode)
2992 *        stuff();
2993 *        inode->i_size = expr;
2994 *
2995 * is in error because a kswapd-driven write_inode() could occur while
2996 * `stuff()' is running, and the new i_size will be lost.  Plus the inode
2997 * will no longer be on the superblock's dirty inode list.
2998 */
2999int ext3_write_inode(struct inode *inode, int wait)
3000{
3001        if (current->flags & PF_MEMALLOC)
3002                return 0;
3003
3004        if (ext3_journal_current_handle()) {
3005                jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3006                dump_stack();
3007                return -EIO;
3008        }
3009
3010        if (!wait)
3011                return 0;
3012
3013        return ext3_force_commit(inode->i_sb);
3014}
3015
3016/*
3017 * ext3_setattr()
3018 *
3019 * Called from notify_change.
3020 *
3021 * We want to trap VFS attempts to truncate the file as soon as
3022 * possible.  In particular, we want to make sure that when the VFS
3023 * shrinks i_size, we put the inode on the orphan list and modify
3024 * i_disksize immediately, so that during the subsequent flushing of
3025 * dirty pages and freeing of disk blocks, we can guarantee that any
3026 * commit will leave the blocks being flushed in an unused state on
3027 * disk.  (On recovery, the inode will get truncated and the blocks will
3028 * be freed, so we have a strong guarantee that no future commit will
3029 * leave these blocks visible to the user.)
3030 *
3031 * Called with inode->sem down.
3032 */
3033int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3034{
3035        struct inode *inode = dentry->d_inode;
3036        int error, rc = 0;
3037        const unsigned int ia_valid = attr->ia_valid;
3038
3039        error = inode_change_ok(inode, attr);
3040        if (error)
3041                return error;
3042
3043        if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3044                (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3045                handle_t *handle;
3046
3047                /* (user+group)*(old+new) structure, inode write (sb,
3048                 * inode block, ? - but truncate inode update has it) */
3049                handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+
3050                                        EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3051                if (IS_ERR(handle)) {
3052                        error = PTR_ERR(handle);
3053                        goto err_out;
3054                }
3055                error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3056                if (error) {
3057                        ext3_journal_stop(handle);
3058                        return error;
3059                }
3060                /* Update corresponding info in inode so that everything is in
3061                 * one transaction */
3062                if (attr->ia_valid & ATTR_UID)
3063                        inode->i_uid = attr->ia_uid;
3064                if (attr->ia_valid & ATTR_GID)
3065                        inode->i_gid = attr->ia_gid;
3066                error = ext3_mark_inode_dirty(handle, inode);
3067                ext3_journal_stop(handle);
3068        }
3069
3070        if (S_ISREG(inode->i_mode) &&
3071            attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3072                handle_t *handle;
3073
3074                handle = ext3_journal_start(inode, 3);
3075                if (IS_ERR(handle)) {
3076                        error = PTR_ERR(handle);
3077                        goto err_out;
3078                }
3079
3080                error = ext3_orphan_add(handle, inode);
3081                EXT3_I(inode)->i_disksize = attr->ia_size;
3082                rc = ext3_mark_inode_dirty(handle, inode);
3083                if (!error)
3084                        error = rc;
3085                ext3_journal_stop(handle);
3086        }
3087
3088        rc = inode_setattr(inode, attr);
3089
3090        /* If inode_setattr's call to ext3_truncate failed to get a
3091         * transaction handle at all, we need to clean up the in-core
3092         * orphan list manually. */
3093        if (inode->i_nlink)
3094                ext3_orphan_del(NULL, inode);
3095
3096        if (!rc && (ia_valid & ATTR_MODE))
3097                rc = ext3_acl_chmod(inode);
3098
3099err_out:
3100        ext3_std_error(inode->i_sb, error);
3101        if (!error)
3102                error = rc;
3103        return error;
3104}
3105
3106
3107/*
3108 * How many blocks doth make a writepage()?
3109 *
3110 * With N blocks per page, it may be:
3111 * N data blocks
3112 * 2 indirect block
3113 * 2 dindirect
3114 * 1 tindirect
3115 * N+5 bitmap blocks (from the above)
3116 * N+5 group descriptor summary blocks
3117 * 1 inode block
3118 * 1 superblock.
3119 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3120 *
3121 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3122 *
3123 * With ordered or writeback data it's the same, less the N data blocks.
3124 *
3125 * If the inode's direct blocks can hold an integral number of pages then a
3126 * page cannot straddle two indirect blocks, and we can only touch one indirect
3127 * and dindirect block, and the "5" above becomes "3".
3128 *
3129 * This still overestimates under most circumstances.  If we were to pass the
3130 * start and end offsets in here as well we could do block_to_path() on each
3131 * block and work out the exact number of indirects which are touched.  Pah.
3132 */
3133
3134static int ext3_writepage_trans_blocks(struct inode *inode)
3135{
3136        int bpp = ext3_journal_blocks_per_page(inode);
3137        int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3138        int ret;
3139
3140        if (ext3_should_journal_data(inode))
3141                ret = 3 * (bpp + indirects) + 2;
3142        else
3143                ret = 2 * (bpp + indirects) + 2;
3144
3145#ifdef CONFIG_QUOTA
3146        /* We know that structure was already allocated during DQUOT_INIT so
3147         * we will be updating only the data blocks + inodes */
3148        ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
3149#endif
3150
3151        return ret;
3152}
3153
3154/*
3155 * The caller must have previously called ext3_reserve_inode_write().
3156 * Give this, we know that the caller already has write access to iloc->bh.
3157 */
3158int ext3_mark_iloc_dirty(handle_t *handle,
3159                struct inode *inode, struct ext3_iloc *iloc)
3160{
3161        int err = 0;
3162
3163        /* the do_update_inode consumes one bh->b_count */
3164        get_bh(iloc->bh);
3165
3166        /* ext3_do_update_inode() does journal_dirty_metadata */
3167        err = ext3_do_update_inode(handle, inode, iloc);
3168        put_bh(iloc->bh);
3169        return err;
3170}
3171
3172/*
3173 * On success, We end up with an outstanding reference count against
3174 * iloc->bh.  This _must_ be cleaned up later.
3175 */
3176
3177int
3178ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3179                         struct ext3_iloc *iloc)
3180{
3181        int err = 0;
3182        if (handle) {
3183                err = ext3_get_inode_loc(inode, iloc);
3184                if (!err) {
3185                        BUFFER_TRACE(iloc->bh, "get_write_access");
3186                        err = ext3_journal_get_write_access(handle, iloc->bh);
3187                        if (err) {
3188                                brelse(iloc->bh);
3189                                iloc->bh = NULL;
3190                        }
3191                }
3192        }
3193        ext3_std_error(inode->i_sb, err);
3194        return err;
3195}
3196
3197/*
3198 * What we do here is to mark the in-core inode as clean with respect to inode
3199 * dirtiness (it may still be data-dirty).
3200 * This means that the in-core inode may be reaped by prune_icache
3201 * without having to perform any I/O.  This is a very good thing,
3202 * because *any* task may call prune_icache - even ones which
3203 * have a transaction open against a different journal.
3204 *
3205 * Is this cheating?  Not really.  Sure, we haven't written the
3206 * inode out, but prune_icache isn't a user-visible syncing function.
3207 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3208 * we start and wait on commits.
3209 *
3210 * Is this efficient/effective?  Well, we're being nice to the system
3211 * by cleaning up our inodes proactively so they can be reaped
3212 * without I/O.  But we are potentially leaving up to five seconds'
3213 * worth of inodes floating about which prune_icache wants us to
3214 * write out.  One way to fix that would be to get prune_icache()
3215 * to do a write_super() to free up some memory.  It has the desired
3216 * effect.
3217 */
3218int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3219{
3220        struct ext3_iloc iloc;
3221        int err;
3222
3223        might_sleep();
3224        err = ext3_reserve_inode_write(handle, inode, &iloc);
3225        if (!err)
3226                err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3227        return err;
3228}
3229
3230/*
3231 * ext3_dirty_inode() is called from __mark_inode_dirty()
3232 *
3233 * We're really interested in the case where a file is being extended.
3234 * i_size has been changed by generic_commit_write() and we thus need
3235 * to include the updated inode in the current transaction.
3236 *
3237 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3238 * are allocated to the file.
3239 *
3240 * If the inode is marked synchronous, we don't honour that here - doing
3241 * so would cause a commit on atime updates, which we don't bother doing.
3242 * We handle synchronous inodes at the highest possible level.
3243 */
3244void ext3_dirty_inode(struct inode *inode)
3245{
3246        handle_t *current_handle = ext3_journal_current_handle();
3247        handle_t *handle;
3248
3249        handle = ext3_journal_start(inode, 2);
3250        if (IS_ERR(handle))
3251                goto out;
3252        if (current_handle &&
3253                current_handle->h_transaction != handle->h_transaction) {
3254                /* This task has a transaction open against a different fs */
3255                printk(KERN_EMERG "%s: transactions do not match!\n",
3256                       __func__);
3257        } else {
3258                jbd_debug(5, "marking dirty.  outer handle=%p\n",
3259                                current_handle);
3260                ext3_mark_inode_dirty(handle, inode);
3261        }
3262        ext3_journal_stop(handle);
3263out:
3264        return;
3265}
3266
3267#if 0
3268/*
3269 * Bind an inode's backing buffer_head into this transaction, to prevent
3270 * it from being flushed to disk early.  Unlike
3271 * ext3_reserve_inode_write, this leaves behind no bh reference and
3272 * returns no iloc structure, so the caller needs to repeat the iloc
3273 * lookup to mark the inode dirty later.
3274 */
3275static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3276{
3277        struct ext3_iloc iloc;
3278
3279        int err = 0;
3280        if (handle) {
3281                err = ext3_get_inode_loc(inode, &iloc);
3282                if (!err) {
3283                        BUFFER_TRACE(iloc.bh, "get_write_access");
3284                        err = journal_get_write_access(handle, iloc.bh);
3285                        if (!err)
3286                                err = ext3_journal_dirty_metadata(handle,
3287                                                                  iloc.bh);
3288                        brelse(iloc.bh);
3289                }
3290        }
3291        ext3_std_error(inode->i_sb, err);
3292        return err;
3293}
3294#endif
3295
3296int ext3_change_inode_journal_flag(struct inode *inode, int val)
3297{
3298        journal_t *journal;
3299        handle_t *handle;
3300        int err;
3301
3302        /*
3303         * We have to be very careful here: changing a data block's
3304         * journaling status dynamically is dangerous.  If we write a
3305         * data block to the journal, change the status and then delete
3306         * that block, we risk forgetting to revoke the old log record
3307         * from the journal and so a subsequent replay can corrupt data.
3308         * So, first we make sure that the journal is empty and that
3309         * nobody is changing anything.
3310         */
3311
3312        journal = EXT3_JOURNAL(inode);
3313        if (is_journal_aborted(journal))
3314                return -EROFS;
3315
3316        journal_lock_updates(journal);
3317        journal_flush(journal);
3318
3319        /*
3320         * OK, there are no updates running now, and all cached data is
3321         * synced to disk.  We are now in a completely consistent state
3322         * which doesn't have anything in the journal, and we know that
3323         * no filesystem updates are running, so it is safe to modify
3324         * the inode's in-core data-journaling state flag now.
3325         */
3326
3327        if (val)
3328                EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3329        else
3330                EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3331        ext3_set_aops(inode);
3332
3333        journal_unlock_updates(journal);
3334
3335        /* Finally we can mark the inode as dirty. */
3336
3337        handle = ext3_journal_start(inode, 1);
3338        if (IS_ERR(handle))
3339                return PTR_ERR(handle);
3340
3341        err = ext3_mark_inode_dirty(handle, inode);
3342        handle->h_sync = 1;
3343        ext3_journal_stop(handle);
3344        ext3_std_error(inode->i_sb, err);
3345
3346        return err;
3347}