Showing error 1327

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: fs/inode.c
Line in file: 1333
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2012-05-21 20:30:05 UTC


Source:

   1/*
   2 * linux/fs/inode.c
   3 *
   4 * (C) 1997 Linus Torvalds
   5 */
   6
   7#include <linux/fs.h>
   8#include <linux/mm.h>
   9#include <linux/dcache.h>
  10#include <linux/init.h>
  11#include <linux/quotaops.h>
  12#include <linux/slab.h>
  13#include <linux/writeback.h>
  14#include <linux/module.h>
  15#include <linux/backing-dev.h>
  16#include <linux/wait.h>
  17#include <linux/hash.h>
  18#include <linux/swap.h>
  19#include <linux/security.h>
  20#include <linux/pagemap.h>
  21#include <linux/cdev.h>
  22#include <linux/bootmem.h>
  23#include <linux/inotify.h>
  24#include <linux/mount.h>
  25
  26/*
  27 * This is needed for the following functions:
  28 *  - inode_has_buffers
  29 *  - invalidate_inode_buffers
  30 *  - invalidate_bdev
  31 *
  32 * FIXME: remove all knowledge of the buffer layer from this file
  33 */
  34#include <linux/buffer_head.h>
  35
  36/*
  37 * New inode.c implementation.
  38 *
  39 * This implementation has the basic premise of trying
  40 * to be extremely low-overhead and SMP-safe, yet be
  41 * simple enough to be "obviously correct".
  42 *
  43 * Famous last words.
  44 */
  45
  46/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
  47
  48/* #define INODE_PARANOIA 1 */
  49/* #define INODE_DEBUG 1 */
  50
  51/*
  52 * Inode lookup is no longer as critical as it used to be:
  53 * most of the lookups are going to be through the dcache.
  54 */
  55#define I_HASHBITS        i_hash_shift
  56#define I_HASHMASK        i_hash_mask
  57
  58static unsigned int i_hash_mask __read_mostly;
  59static unsigned int i_hash_shift __read_mostly;
  60
  61/*
  62 * Each inode can be on two separate lists. One is
  63 * the hash list of the inode, used for lookups. The
  64 * other linked list is the "type" list:
  65 *  "in_use" - valid inode, i_count > 0, i_nlink > 0
  66 *  "dirty"  - as "in_use" but also dirty
  67 *  "unused" - valid inode, i_count = 0
  68 *
  69 * A "dirty" list is maintained for each super block,
  70 * allowing for low-overhead inode sync() operations.
  71 */
  72
  73LIST_HEAD(inode_in_use);
  74LIST_HEAD(inode_unused);
  75static struct hlist_head *inode_hashtable __read_mostly;
  76
  77/*
  78 * A simple spinlock to protect the list manipulations.
  79 *
  80 * NOTE! You also have to own the lock if you change
  81 * the i_state of an inode while it is in use..
  82 */
  83DEFINE_SPINLOCK(inode_lock);
  84
  85/*
  86 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
  87 * icache shrinking path, and the umount path.  Without this exclusion,
  88 * by the time prune_icache calls iput for the inode whose pages it has
  89 * been invalidating, or by the time it calls clear_inode & destroy_inode
  90 * from its final dispose_list, the struct super_block they refer to
  91 * (for inode->i_sb->s_op) may already have been freed and reused.
  92 */
  93static DEFINE_MUTEX(iprune_mutex);
  94
  95/*
  96 * Statistics gathering..
  97 */
  98struct inodes_stat_t inodes_stat;
  99
 100static struct kmem_cache * inode_cachep __read_mostly;
 101
 102static void wake_up_inode(struct inode *inode)
 103{
 104        /*
 105         * Prevent speculative execution through spin_unlock(&inode_lock);
 106         */
 107        smp_mb();
 108        wake_up_bit(&inode->i_state, __I_LOCK);
 109}
 110
 111static struct inode *alloc_inode(struct super_block *sb)
 112{
 113        static const struct address_space_operations empty_aops;
 114        static struct inode_operations empty_iops;
 115        static const struct file_operations empty_fops;
 116        struct inode *inode;
 117
 118        if (sb->s_op->alloc_inode)
 119                inode = sb->s_op->alloc_inode(sb);
 120        else
 121                inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
 122
 123        if (inode) {
 124                struct address_space * const mapping = &inode->i_data;
 125
 126                inode->i_sb = sb;
 127                inode->i_blkbits = sb->s_blocksize_bits;
 128                inode->i_flags = 0;
 129                atomic_set(&inode->i_count, 1);
 130                inode->i_op = &empty_iops;
 131                inode->i_fop = &empty_fops;
 132                inode->i_nlink = 1;
 133                atomic_set(&inode->i_writecount, 0);
 134                inode->i_size = 0;
 135                inode->i_blocks = 0;
 136                inode->i_bytes = 0;
 137                inode->i_generation = 0;
 138#ifdef CONFIG_QUOTA
 139                memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
 140#endif
 141                inode->i_pipe = NULL;
 142                inode->i_bdev = NULL;
 143                inode->i_cdev = NULL;
 144                inode->i_rdev = 0;
 145                inode->dirtied_when = 0;
 146                if (security_inode_alloc(inode)) {
 147                        if (inode->i_sb->s_op->destroy_inode)
 148                                inode->i_sb->s_op->destroy_inode(inode);
 149                        else
 150                                kmem_cache_free(inode_cachep, (inode));
 151                        return NULL;
 152                }
 153
 154                spin_lock_init(&inode->i_lock);
 155                lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
 156
 157                mutex_init(&inode->i_mutex);
 158                lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
 159
 160                init_rwsem(&inode->i_alloc_sem);
 161                lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
 162
 163                mapping->a_ops = &empty_aops;
 164                 mapping->host = inode;
 165                mapping->flags = 0;
 166                mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
 167                mapping->assoc_mapping = NULL;
 168                mapping->backing_dev_info = &default_backing_dev_info;
 169                mapping->writeback_index = 0;
 170
 171                /*
 172                 * If the block_device provides a backing_dev_info for client
 173                 * inodes then use that.  Otherwise the inode share the bdev's
 174                 * backing_dev_info.
 175                 */
 176                if (sb->s_bdev) {
 177                        struct backing_dev_info *bdi;
 178
 179                        bdi = sb->s_bdev->bd_inode_backing_dev_info;
 180                        if (!bdi)
 181                                bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
 182                        mapping->backing_dev_info = bdi;
 183                }
 184                inode->i_private = NULL;
 185                inode->i_mapping = mapping;
 186        }
 187        return inode;
 188}
 189
 190void destroy_inode(struct inode *inode) 
 191{
 192        BUG_ON(inode_has_buffers(inode));
 193        security_inode_free(inode);
 194        if (inode->i_sb->s_op->destroy_inode)
 195                inode->i_sb->s_op->destroy_inode(inode);
 196        else
 197                kmem_cache_free(inode_cachep, (inode));
 198}
 199
 200
 201/*
 202 * These are initializations that only need to be done
 203 * once, because the fields are idempotent across use
 204 * of the inode, so let the slab aware of that.
 205 */
 206void inode_init_once(struct inode *inode)
 207{
 208        memset(inode, 0, sizeof(*inode));
 209        INIT_HLIST_NODE(&inode->i_hash);
 210        INIT_LIST_HEAD(&inode->i_dentry);
 211        INIT_LIST_HEAD(&inode->i_devices);
 212        INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
 213        spin_lock_init(&inode->i_data.tree_lock);
 214        spin_lock_init(&inode->i_data.i_mmap_lock);
 215        INIT_LIST_HEAD(&inode->i_data.private_list);
 216        spin_lock_init(&inode->i_data.private_lock);
 217        INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
 218        INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
 219        i_size_ordered_init(inode);
 220#ifdef CONFIG_INOTIFY
 221        INIT_LIST_HEAD(&inode->inotify_watches);
 222        mutex_init(&inode->inotify_mutex);
 223#endif
 224}
 225
 226EXPORT_SYMBOL(inode_init_once);
 227
 228static void init_once(void *foo)
 229{
 230        struct inode * inode = (struct inode *) foo;
 231
 232        inode_init_once(inode);
 233}
 234
 235/*
 236 * inode_lock must be held
 237 */
 238void __iget(struct inode * inode)
 239{
 240        if (atomic_read(&inode->i_count)) {
 241                atomic_inc(&inode->i_count);
 242                return;
 243        }
 244        atomic_inc(&inode->i_count);
 245        if (!(inode->i_state & (I_DIRTY|I_SYNC)))
 246                list_move(&inode->i_list, &inode_in_use);
 247        inodes_stat.nr_unused--;
 248}
 249
 250/**
 251 * clear_inode - clear an inode
 252 * @inode: inode to clear
 253 *
 254 * This is called by the filesystem to tell us
 255 * that the inode is no longer useful. We just
 256 * terminate it with extreme prejudice.
 257 */
 258void clear_inode(struct inode *inode)
 259{
 260        might_sleep();
 261        invalidate_inode_buffers(inode);
 262       
 263        BUG_ON(inode->i_data.nrpages);
 264        BUG_ON(!(inode->i_state & I_FREEING));
 265        BUG_ON(inode->i_state & I_CLEAR);
 266        inode_sync_wait(inode);
 267        DQUOT_DROP(inode);
 268        if (inode->i_sb->s_op->clear_inode)
 269                inode->i_sb->s_op->clear_inode(inode);
 270        if (S_ISBLK(inode->i_mode) && inode->i_bdev)
 271                bd_forget(inode);
 272        if (S_ISCHR(inode->i_mode) && inode->i_cdev)
 273                cd_forget(inode);
 274        inode->i_state = I_CLEAR;
 275}
 276
 277EXPORT_SYMBOL(clear_inode);
 278
 279/*
 280 * dispose_list - dispose of the contents of a local list
 281 * @head: the head of the list to free
 282 *
 283 * Dispose-list gets a local list with local inodes in it, so it doesn't
 284 * need to worry about list corruption and SMP locks.
 285 */
 286static void dispose_list(struct list_head *head)
 287{
 288        int nr_disposed = 0;
 289
 290        while (!list_empty(head)) {
 291                struct inode *inode;
 292
 293                inode = list_first_entry(head, struct inode, i_list);
 294                list_del(&inode->i_list);
 295
 296                if (inode->i_data.nrpages)
 297                        truncate_inode_pages(&inode->i_data, 0);
 298                clear_inode(inode);
 299
 300                spin_lock(&inode_lock);
 301                hlist_del_init(&inode->i_hash);
 302                list_del_init(&inode->i_sb_list);
 303                spin_unlock(&inode_lock);
 304
 305                wake_up_inode(inode);
 306                destroy_inode(inode);
 307                nr_disposed++;
 308        }
 309        spin_lock(&inode_lock);
 310        inodes_stat.nr_inodes -= nr_disposed;
 311        spin_unlock(&inode_lock);
 312}
 313
 314/*
 315 * Invalidate all inodes for a device.
 316 */
 317static int invalidate_list(struct list_head *head, struct list_head *dispose)
 318{
 319        struct list_head *next;
 320        int busy = 0, count = 0;
 321
 322        next = head->next;
 323        for (;;) {
 324                struct list_head * tmp = next;
 325                struct inode * inode;
 326
 327                /*
 328                 * We can reschedule here without worrying about the list's
 329                 * consistency because the per-sb list of inodes must not
 330                 * change during umount anymore, and because iprune_mutex keeps
 331                 * shrink_icache_memory() away.
 332                 */
 333                cond_resched_lock(&inode_lock);
 334
 335                next = next->next;
 336                if (tmp == head)
 337                        break;
 338                inode = list_entry(tmp, struct inode, i_sb_list);
 339                invalidate_inode_buffers(inode);
 340                if (!atomic_read(&inode->i_count)) {
 341                        list_move(&inode->i_list, dispose);
 342                        inode->i_state |= I_FREEING;
 343                        count++;
 344                        continue;
 345                }
 346                busy = 1;
 347        }
 348        /* only unused inodes may be cached with i_count zero */
 349        inodes_stat.nr_unused -= count;
 350        return busy;
 351}
 352
 353/**
 354 *        invalidate_inodes        - discard the inodes on a device
 355 *        @sb: superblock
 356 *
 357 *        Discard all of the inodes for a given superblock. If the discard
 358 *        fails because there are busy inodes then a non zero value is returned.
 359 *        If the discard is successful all the inodes have been discarded.
 360 */
 361int invalidate_inodes(struct super_block * sb)
 362{
 363        int busy;
 364        LIST_HEAD(throw_away);
 365
 366        mutex_lock(&iprune_mutex);
 367        spin_lock(&inode_lock);
 368        inotify_unmount_inodes(&sb->s_inodes);
 369        busy = invalidate_list(&sb->s_inodes, &throw_away);
 370        spin_unlock(&inode_lock);
 371
 372        dispose_list(&throw_away);
 373        mutex_unlock(&iprune_mutex);
 374
 375        return busy;
 376}
 377
 378EXPORT_SYMBOL(invalidate_inodes);
 379
 380static int can_unuse(struct inode *inode)
 381{
 382        if (inode->i_state)
 383                return 0;
 384        if (inode_has_buffers(inode))
 385                return 0;
 386        if (atomic_read(&inode->i_count))
 387                return 0;
 388        if (inode->i_data.nrpages)
 389                return 0;
 390        return 1;
 391}
 392
 393/*
 394 * Scan `goal' inodes on the unused list for freeable ones. They are moved to
 395 * a temporary list and then are freed outside inode_lock by dispose_list().
 396 *
 397 * Any inodes which are pinned purely because of attached pagecache have their
 398 * pagecache removed.  We expect the final iput() on that inode to add it to
 399 * the front of the inode_unused list.  So look for it there and if the
 400 * inode is still freeable, proceed.  The right inode is found 99.9% of the
 401 * time in testing on a 4-way.
 402 *
 403 * If the inode has metadata buffers attached to mapping->private_list then
 404 * try to remove them.
 405 */
 406static void prune_icache(int nr_to_scan)
 407{
 408        LIST_HEAD(freeable);
 409        int nr_pruned = 0;
 410        int nr_scanned;
 411        unsigned long reap = 0;
 412
 413        mutex_lock(&iprune_mutex);
 414        spin_lock(&inode_lock);
 415        for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
 416                struct inode *inode;
 417
 418                if (list_empty(&inode_unused))
 419                        break;
 420
 421                inode = list_entry(inode_unused.prev, struct inode, i_list);
 422
 423                if (inode->i_state || atomic_read(&inode->i_count)) {
 424                        list_move(&inode->i_list, &inode_unused);
 425                        continue;
 426                }
 427                if (inode_has_buffers(inode) || inode->i_data.nrpages) {
 428                        __iget(inode);
 429                        spin_unlock(&inode_lock);
 430                        if (remove_inode_buffers(inode))
 431                                reap += invalidate_mapping_pages(&inode->i_data,
 432                                                                0, -1);
 433                        iput(inode);
 434                        spin_lock(&inode_lock);
 435
 436                        if (inode != list_entry(inode_unused.next,
 437                                                struct inode, i_list))
 438                                continue;        /* wrong inode or list_empty */
 439                        if (!can_unuse(inode))
 440                                continue;
 441                }
 442                list_move(&inode->i_list, &freeable);
 443                inode->i_state |= I_FREEING;
 444                nr_pruned++;
 445        }
 446        inodes_stat.nr_unused -= nr_pruned;
 447        if (current_is_kswapd())
 448                __count_vm_events(KSWAPD_INODESTEAL, reap);
 449        else
 450                __count_vm_events(PGINODESTEAL, reap);
 451        spin_unlock(&inode_lock);
 452
 453        dispose_list(&freeable);
 454        mutex_unlock(&iprune_mutex);
 455}
 456
 457/*
 458 * shrink_icache_memory() will attempt to reclaim some unused inodes.  Here,
 459 * "unused" means that no dentries are referring to the inodes: the files are
 460 * not open and the dcache references to those inodes have already been
 461 * reclaimed.
 462 *
 463 * This function is passed the number of inodes to scan, and it returns the
 464 * total number of remaining possibly-reclaimable inodes.
 465 */
 466static int shrink_icache_memory(int nr, gfp_t gfp_mask)
 467{
 468        if (nr) {
 469                /*
 470                 * Nasty deadlock avoidance.  We may hold various FS locks,
 471                 * and we don't want to recurse into the FS that called us
 472                 * in clear_inode() and friends..
 473                  */
 474                if (!(gfp_mask & __GFP_FS))
 475                        return -1;
 476                prune_icache(nr);
 477        }
 478        return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 479}
 480
 481static struct shrinker icache_shrinker = {
 482        .shrink = shrink_icache_memory,
 483        .seeks = DEFAULT_SEEKS,
 484};
 485
 486static void __wait_on_freeing_inode(struct inode *inode);
 487/*
 488 * Called with the inode lock held.
 489 * NOTE: we are not increasing the inode-refcount, you must call __iget()
 490 * by hand after calling find_inode now! This simplifies iunique and won't
 491 * add any additional branch in the common code.
 492 */
 493static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data)
 494{
 495        struct hlist_node *node;
 496        struct inode * inode = NULL;
 497
 498repeat:
 499        hlist_for_each_entry(inode, node, head, i_hash) {
 500                if (inode->i_sb != sb)
 501                        continue;
 502                if (!test(inode, data))
 503                        continue;
 504                if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
 505                        __wait_on_freeing_inode(inode);
 506                        goto repeat;
 507                }
 508                break;
 509        }
 510        return node ? inode : NULL;
 511}
 512
 513/*
 514 * find_inode_fast is the fast path version of find_inode, see the comment at
 515 * iget_locked for details.
 516 */
 517static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
 518{
 519        struct hlist_node *node;
 520        struct inode * inode = NULL;
 521
 522repeat:
 523        hlist_for_each_entry(inode, node, head, i_hash) {
 524                if (inode->i_ino != ino)
 525                        continue;
 526                if (inode->i_sb != sb)
 527                        continue;
 528                if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
 529                        __wait_on_freeing_inode(inode);
 530                        goto repeat;
 531                }
 532                break;
 533        }
 534        return node ? inode : NULL;
 535}
 536
 537/**
 538 *        new_inode         - obtain an inode
 539 *        @sb: superblock
 540 *
 541 *        Allocates a new inode for given superblock. The default gfp_mask
 542 *        for allocations related to inode->i_mapping is GFP_HIGHUSER_PAGECACHE.
 543 *        If HIGHMEM pages are unsuitable or it is known that pages allocated
 544 *        for the page cache are not reclaimable or migratable,
 545 *        mapping_set_gfp_mask() must be called with suitable flags on the
 546 *        newly created inode's mapping
 547 *
 548 */
 549struct inode *new_inode(struct super_block *sb)
 550{
 551        /*
 552         * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 553         * error if st_ino won't fit in target struct field. Use 32bit counter
 554         * here to attempt to avoid that.
 555         */
 556        static unsigned int last_ino;
 557        struct inode * inode;
 558
 559        spin_lock_prefetch(&inode_lock);
 560        
 561        inode = alloc_inode(sb);
 562        if (inode) {
 563                spin_lock(&inode_lock);
 564                inodes_stat.nr_inodes++;
 565                list_add(&inode->i_list, &inode_in_use);
 566                list_add(&inode->i_sb_list, &sb->s_inodes);
 567                inode->i_ino = ++last_ino;
 568                inode->i_state = 0;
 569                spin_unlock(&inode_lock);
 570        }
 571        return inode;
 572}
 573
 574EXPORT_SYMBOL(new_inode);
 575
 576void unlock_new_inode(struct inode *inode)
 577{
 578#ifdef CONFIG_DEBUG_LOCK_ALLOC
 579        if (inode->i_mode & S_IFDIR) {
 580                struct file_system_type *type = inode->i_sb->s_type;
 581
 582                /*
 583                 * ensure nobody is actually holding i_mutex
 584                 */
 585                mutex_destroy(&inode->i_mutex);
 586                mutex_init(&inode->i_mutex);
 587                lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key);
 588        }
 589#endif
 590        /*
 591         * This is special!  We do not need the spinlock
 592         * when clearing I_LOCK, because we're guaranteed
 593         * that nobody else tries to do anything about the
 594         * state of the inode when it is locked, as we
 595         * just created it (so there can be no old holders
 596         * that haven't tested I_LOCK).
 597         */
 598        inode->i_state &= ~(I_LOCK|I_NEW);
 599        wake_up_inode(inode);
 600}
 601
 602EXPORT_SYMBOL(unlock_new_inode);
 603
 604/*
 605 * This is called without the inode lock held.. Be careful.
 606 *
 607 * We no longer cache the sb_flags in i_flags - see fs.h
 608 *        -- rmk@arm.uk.linux.org
 609 */
 610static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
 611{
 612        struct inode * inode;
 613
 614        inode = alloc_inode(sb);
 615        if (inode) {
 616                struct inode * old;
 617
 618                spin_lock(&inode_lock);
 619                /* We released the lock, so.. */
 620                old = find_inode(sb, head, test, data);
 621                if (!old) {
 622                        if (set(inode, data))
 623                                goto set_failed;
 624
 625                        inodes_stat.nr_inodes++;
 626                        list_add(&inode->i_list, &inode_in_use);
 627                        list_add(&inode->i_sb_list, &sb->s_inodes);
 628                        hlist_add_head(&inode->i_hash, head);
 629                        inode->i_state = I_LOCK|I_NEW;
 630                        spin_unlock(&inode_lock);
 631
 632                        /* Return the locked inode with I_NEW set, the
 633                         * caller is responsible for filling in the contents
 634                         */
 635                        return inode;
 636                }
 637
 638                /*
 639                 * Uhhuh, somebody else created the same inode under
 640                 * us. Use the old inode instead of the one we just
 641                 * allocated.
 642                 */
 643                __iget(old);
 644                spin_unlock(&inode_lock);
 645                destroy_inode(inode);
 646                inode = old;
 647                wait_on_inode(inode);
 648        }
 649        return inode;
 650
 651set_failed:
 652        spin_unlock(&inode_lock);
 653        destroy_inode(inode);
 654        return NULL;
 655}
 656
 657/*
 658 * get_new_inode_fast is the fast path version of get_new_inode, see the
 659 * comment at iget_locked for details.
 660 */
 661static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino)
 662{
 663        struct inode * inode;
 664
 665        inode = alloc_inode(sb);
 666        if (inode) {
 667                struct inode * old;
 668
 669                spin_lock(&inode_lock);
 670                /* We released the lock, so.. */
 671                old = find_inode_fast(sb, head, ino);
 672                if (!old) {
 673                        inode->i_ino = ino;
 674                        inodes_stat.nr_inodes++;
 675                        list_add(&inode->i_list, &inode_in_use);
 676                        list_add(&inode->i_sb_list, &sb->s_inodes);
 677                        hlist_add_head(&inode->i_hash, head);
 678                        inode->i_state = I_LOCK|I_NEW;
 679                        spin_unlock(&inode_lock);
 680
 681                        /* Return the locked inode with I_NEW set, the
 682                         * caller is responsible for filling in the contents
 683                         */
 684                        return inode;
 685                }
 686
 687                /*
 688                 * Uhhuh, somebody else created the same inode under
 689                 * us. Use the old inode instead of the one we just
 690                 * allocated.
 691                 */
 692                __iget(old);
 693                spin_unlock(&inode_lock);
 694                destroy_inode(inode);
 695                inode = old;
 696                wait_on_inode(inode);
 697        }
 698        return inode;
 699}
 700
 701static unsigned long hash(struct super_block *sb, unsigned long hashval)
 702{
 703        unsigned long tmp;
 704
 705        tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
 706                        L1_CACHE_BYTES;
 707        tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
 708        return tmp & I_HASHMASK;
 709}
 710
 711/**
 712 *        iunique - get a unique inode number
 713 *        @sb: superblock
 714 *        @max_reserved: highest reserved inode number
 715 *
 716 *        Obtain an inode number that is unique on the system for a given
 717 *        superblock. This is used by file systems that have no natural
 718 *        permanent inode numbering system. An inode number is returned that
 719 *        is higher than the reserved limit but unique.
 720 *
 721 *        BUGS:
 722 *        With a large number of inodes live on the file system this function
 723 *        currently becomes quite slow.
 724 */
 725ino_t iunique(struct super_block *sb, ino_t max_reserved)
 726{
 727        /*
 728         * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 729         * error if st_ino won't fit in target struct field. Use 32bit counter
 730         * here to attempt to avoid that.
 731         */
 732        static unsigned int counter;
 733        struct inode *inode;
 734        struct hlist_head *head;
 735        ino_t res;
 736
 737        spin_lock(&inode_lock);
 738        do {
 739                if (counter <= max_reserved)
 740                        counter = max_reserved + 1;
 741                res = counter++;
 742                head = inode_hashtable + hash(sb, res);
 743                inode = find_inode_fast(sb, head, res);
 744        } while (inode != NULL);
 745        spin_unlock(&inode_lock);
 746
 747        return res;
 748}
 749EXPORT_SYMBOL(iunique);
 750
 751struct inode *igrab(struct inode *inode)
 752{
 753        spin_lock(&inode_lock);
 754        if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)))
 755                __iget(inode);
 756        else
 757                /*
 758                 * Handle the case where s_op->clear_inode is not been
 759                 * called yet, and somebody is calling igrab
 760                 * while the inode is getting freed.
 761                 */
 762                inode = NULL;
 763        spin_unlock(&inode_lock);
 764        return inode;
 765}
 766
 767EXPORT_SYMBOL(igrab);
 768
 769/**
 770 * ifind - internal function, you want ilookup5() or iget5().
 771 * @sb:                super block of file system to search
 772 * @head:       the head of the list to search
 773 * @test:        callback used for comparisons between inodes
 774 * @data:        opaque data pointer to pass to @test
 775 * @wait:        if true wait for the inode to be unlocked, if false do not
 776 *
 777 * ifind() searches for the inode specified by @data in the inode
 778 * cache. This is a generalized version of ifind_fast() for file systems where
 779 * the inode number is not sufficient for unique identification of an inode.
 780 *
 781 * If the inode is in the cache, the inode is returned with an incremented
 782 * reference count.
 783 *
 784 * Otherwise NULL is returned.
 785 *
 786 * Note, @test is called with the inode_lock held, so can't sleep.
 787 */
 788static struct inode *ifind(struct super_block *sb,
 789                struct hlist_head *head, int (*test)(struct inode *, void *),
 790                void *data, const int wait)
 791{
 792        struct inode *inode;
 793
 794        spin_lock(&inode_lock);
 795        inode = find_inode(sb, head, test, data);
 796        if (inode) {
 797                __iget(inode);
 798                spin_unlock(&inode_lock);
 799                if (likely(wait))
 800                        wait_on_inode(inode);
 801                return inode;
 802        }
 803        spin_unlock(&inode_lock);
 804        return NULL;
 805}
 806
 807/**
 808 * ifind_fast - internal function, you want ilookup() or iget().
 809 * @sb:                super block of file system to search
 810 * @head:       head of the list to search
 811 * @ino:        inode number to search for
 812 *
 813 * ifind_fast() searches for the inode @ino in the inode cache. This is for
 814 * file systems where the inode number is sufficient for unique identification
 815 * of an inode.
 816 *
 817 * If the inode is in the cache, the inode is returned with an incremented
 818 * reference count.
 819 *
 820 * Otherwise NULL is returned.
 821 */
 822static struct inode *ifind_fast(struct super_block *sb,
 823                struct hlist_head *head, unsigned long ino)
 824{
 825        struct inode *inode;
 826
 827        spin_lock(&inode_lock);
 828        inode = find_inode_fast(sb, head, ino);
 829        if (inode) {
 830                __iget(inode);
 831                spin_unlock(&inode_lock);
 832                wait_on_inode(inode);
 833                return inode;
 834        }
 835        spin_unlock(&inode_lock);
 836        return NULL;
 837}
 838
 839/**
 840 * ilookup5_nowait - search for an inode in the inode cache
 841 * @sb:                super block of file system to search
 842 * @hashval:        hash value (usually inode number) to search for
 843 * @test:        callback used for comparisons between inodes
 844 * @data:        opaque data pointer to pass to @test
 845 *
 846 * ilookup5() uses ifind() to search for the inode specified by @hashval and
 847 * @data in the inode cache. This is a generalized version of ilookup() for
 848 * file systems where the inode number is not sufficient for unique
 849 * identification of an inode.
 850 *
 851 * If the inode is in the cache, the inode is returned with an incremented
 852 * reference count.  Note, the inode lock is not waited upon so you have to be
 853 * very careful what you do with the returned inode.  You probably should be
 854 * using ilookup5() instead.
 855 *
 856 * Otherwise NULL is returned.
 857 *
 858 * Note, @test is called with the inode_lock held, so can't sleep.
 859 */
 860struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
 861                int (*test)(struct inode *, void *), void *data)
 862{
 863        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
 864
 865        return ifind(sb, head, test, data, 0);
 866}
 867
 868EXPORT_SYMBOL(ilookup5_nowait);
 869
 870/**
 871 * ilookup5 - search for an inode in the inode cache
 872 * @sb:                super block of file system to search
 873 * @hashval:        hash value (usually inode number) to search for
 874 * @test:        callback used for comparisons between inodes
 875 * @data:        opaque data pointer to pass to @test
 876 *
 877 * ilookup5() uses ifind() to search for the inode specified by @hashval and
 878 * @data in the inode cache. This is a generalized version of ilookup() for
 879 * file systems where the inode number is not sufficient for unique
 880 * identification of an inode.
 881 *
 882 * If the inode is in the cache, the inode lock is waited upon and the inode is
 883 * returned with an incremented reference count.
 884 *
 885 * Otherwise NULL is returned.
 886 *
 887 * Note, @test is called with the inode_lock held, so can't sleep.
 888 */
 889struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
 890                int (*test)(struct inode *, void *), void *data)
 891{
 892        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
 893
 894        return ifind(sb, head, test, data, 1);
 895}
 896
 897EXPORT_SYMBOL(ilookup5);
 898
 899/**
 900 * ilookup - search for an inode in the inode cache
 901 * @sb:                super block of file system to search
 902 * @ino:        inode number to search for
 903 *
 904 * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache.
 905 * This is for file systems where the inode number is sufficient for unique
 906 * identification of an inode.
 907 *
 908 * If the inode is in the cache, the inode is returned with an incremented
 909 * reference count.
 910 *
 911 * Otherwise NULL is returned.
 912 */
 913struct inode *ilookup(struct super_block *sb, unsigned long ino)
 914{
 915        struct hlist_head *head = inode_hashtable + hash(sb, ino);
 916
 917        return ifind_fast(sb, head, ino);
 918}
 919
 920EXPORT_SYMBOL(ilookup);
 921
 922/**
 923 * iget5_locked - obtain an inode from a mounted file system
 924 * @sb:                super block of file system
 925 * @hashval:        hash value (usually inode number) to get
 926 * @test:        callback used for comparisons between inodes
 927 * @set:        callback used to initialize a new struct inode
 928 * @data:        opaque data pointer to pass to @test and @set
 929 *
 930 * iget5_locked() uses ifind() to search for the inode specified by @hashval
 931 * and @data in the inode cache and if present it is returned with an increased
 932 * reference count. This is a generalized version of iget_locked() for file
 933 * systems where the inode number is not sufficient for unique identification
 934 * of an inode.
 935 *
 936 * If the inode is not in cache, get_new_inode() is called to allocate a new
 937 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
 938 * file system gets to fill it in before unlocking it via unlock_new_inode().
 939 *
 940 * Note both @test and @set are called with the inode_lock held, so can't sleep.
 941 */
 942struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
 943                int (*test)(struct inode *, void *),
 944                int (*set)(struct inode *, void *), void *data)
 945{
 946        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
 947        struct inode *inode;
 948
 949        inode = ifind(sb, head, test, data, 1);
 950        if (inode)
 951                return inode;
 952        /*
 953         * get_new_inode() will do the right thing, re-trying the search
 954         * in case it had to block at any point.
 955         */
 956        return get_new_inode(sb, head, test, set, data);
 957}
 958
 959EXPORT_SYMBOL(iget5_locked);
 960
 961/**
 962 * iget_locked - obtain an inode from a mounted file system
 963 * @sb:                super block of file system
 964 * @ino:        inode number to get
 965 *
 966 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
 967 * the inode cache and if present it is returned with an increased reference
 968 * count. This is for file systems where the inode number is sufficient for
 969 * unique identification of an inode.
 970 *
 971 * If the inode is not in cache, get_new_inode_fast() is called to allocate a
 972 * new inode and this is returned locked, hashed, and with the I_NEW flag set.
 973 * The file system gets to fill it in before unlocking it via
 974 * unlock_new_inode().
 975 */
 976struct inode *iget_locked(struct super_block *sb, unsigned long ino)
 977{
 978        struct hlist_head *head = inode_hashtable + hash(sb, ino);
 979        struct inode *inode;
 980
 981        inode = ifind_fast(sb, head, ino);
 982        if (inode)
 983                return inode;
 984        /*
 985         * get_new_inode_fast() will do the right thing, re-trying the search
 986         * in case it had to block at any point.
 987         */
 988        return get_new_inode_fast(sb, head, ino);
 989}
 990
 991EXPORT_SYMBOL(iget_locked);
 992
 993/**
 994 *        __insert_inode_hash - hash an inode
 995 *        @inode: unhashed inode
 996 *        @hashval: unsigned long value used to locate this object in the
 997 *                inode_hashtable.
 998 *
 999 *        Add an inode to the inode hash for this superblock.
1000 */
1001void __insert_inode_hash(struct inode *inode, unsigned long hashval)
1002{
1003        struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1004        spin_lock(&inode_lock);
1005        hlist_add_head(&inode->i_hash, head);
1006        spin_unlock(&inode_lock);
1007}
1008
1009EXPORT_SYMBOL(__insert_inode_hash);
1010
1011/**
1012 *        remove_inode_hash - remove an inode from the hash
1013 *        @inode: inode to unhash
1014 *
1015 *        Remove an inode from the superblock.
1016 */
1017void remove_inode_hash(struct inode *inode)
1018{
1019        spin_lock(&inode_lock);
1020        hlist_del_init(&inode->i_hash);
1021        spin_unlock(&inode_lock);
1022}
1023
1024EXPORT_SYMBOL(remove_inode_hash);
1025
1026/*
1027 * Tell the filesystem that this inode is no longer of any interest and should
1028 * be completely destroyed.
1029 *
1030 * We leave the inode in the inode hash table until *after* the filesystem's
1031 * ->delete_inode completes.  This ensures that an iget (such as nfsd might
1032 * instigate) will always find up-to-date information either in the hash or on
1033 * disk.
1034 *
1035 * I_FREEING is set so that no-one will take a new reference to the inode while
1036 * it is being deleted.
1037 */
1038void generic_delete_inode(struct inode *inode)
1039{
1040        const struct super_operations *op = inode->i_sb->s_op;
1041
1042        list_del_init(&inode->i_list);
1043        list_del_init(&inode->i_sb_list);
1044        inode->i_state |= I_FREEING;
1045        inodes_stat.nr_inodes--;
1046        spin_unlock(&inode_lock);
1047
1048        security_inode_delete(inode);
1049
1050        if (op->delete_inode) {
1051                void (*delete)(struct inode *) = op->delete_inode;
1052                if (!is_bad_inode(inode))
1053                        DQUOT_INIT(inode);
1054                /* Filesystems implementing their own
1055                 * s_op->delete_inode are required to call
1056                 * truncate_inode_pages and clear_inode()
1057                 * internally */
1058                delete(inode);
1059        } else {
1060                truncate_inode_pages(&inode->i_data, 0);
1061                clear_inode(inode);
1062        }
1063        spin_lock(&inode_lock);
1064        hlist_del_init(&inode->i_hash);
1065        spin_unlock(&inode_lock);
1066        wake_up_inode(inode);
1067        BUG_ON(inode->i_state != I_CLEAR);
1068        destroy_inode(inode);
1069}
1070
1071EXPORT_SYMBOL(generic_delete_inode);
1072
1073static void generic_forget_inode(struct inode *inode)
1074{
1075        struct super_block *sb = inode->i_sb;
1076
1077        if (!hlist_unhashed(&inode->i_hash)) {
1078                if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1079                        list_move(&inode->i_list, &inode_unused);
1080                inodes_stat.nr_unused++;
1081                if (sb->s_flags & MS_ACTIVE) {
1082                        spin_unlock(&inode_lock);
1083                        return;
1084                }
1085                inode->i_state |= I_WILL_FREE;
1086                spin_unlock(&inode_lock);
1087                write_inode_now(inode, 1);
1088                spin_lock(&inode_lock);
1089                inode->i_state &= ~I_WILL_FREE;
1090                inodes_stat.nr_unused--;
1091                hlist_del_init(&inode->i_hash);
1092        }
1093        list_del_init(&inode->i_list);
1094        list_del_init(&inode->i_sb_list);
1095        inode->i_state |= I_FREEING;
1096        inodes_stat.nr_inodes--;
1097        spin_unlock(&inode_lock);
1098        if (inode->i_data.nrpages)
1099                truncate_inode_pages(&inode->i_data, 0);
1100        clear_inode(inode);
1101        wake_up_inode(inode);
1102        destroy_inode(inode);
1103}
1104
1105/*
1106 * Normal UNIX filesystem behaviour: delete the
1107 * inode when the usage count drops to zero, and
1108 * i_nlink is zero.
1109 */
1110void generic_drop_inode(struct inode *inode)
1111{
1112        if (!inode->i_nlink)
1113                generic_delete_inode(inode);
1114        else
1115                generic_forget_inode(inode);
1116}
1117
1118EXPORT_SYMBOL_GPL(generic_drop_inode);
1119
1120/*
1121 * Called when we're dropping the last reference
1122 * to an inode. 
1123 *
1124 * Call the FS "drop()" function, defaulting to
1125 * the legacy UNIX filesystem behaviour..
1126 *
1127 * NOTE! NOTE! NOTE! We're called with the inode lock
1128 * held, and the drop function is supposed to release
1129 * the lock!
1130 */
1131static inline void iput_final(struct inode *inode)
1132{
1133        const struct super_operations *op = inode->i_sb->s_op;
1134        void (*drop)(struct inode *) = generic_drop_inode;
1135
1136        if (op && op->drop_inode)
1137                drop = op->drop_inode;
1138        drop(inode);
1139}
1140
1141/**
1142 *        iput        - put an inode 
1143 *        @inode: inode to put
1144 *
1145 *        Puts an inode, dropping its usage count. If the inode use count hits
1146 *        zero, the inode is then freed and may also be destroyed.
1147 *
1148 *        Consequently, iput() can sleep.
1149 */
1150void iput(struct inode *inode)
1151{
1152        if (inode) {
1153                BUG_ON(inode->i_state == I_CLEAR);
1154
1155                if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
1156                        iput_final(inode);
1157        }
1158}
1159
1160EXPORT_SYMBOL(iput);
1161
1162/**
1163 *        bmap        - find a block number in a file
1164 *        @inode: inode of file
1165 *        @block: block to find
1166 *
1167 *        Returns the block number on the device holding the inode that
1168 *        is the disk block number for the block of the file requested.
1169 *        That is, asked for block 4 of inode 1 the function will return the
1170 *        disk block relative to the disk start that holds that block of the 
1171 *        file.
1172 */
1173sector_t bmap(struct inode * inode, sector_t block)
1174{
1175        sector_t res = 0;
1176        if (inode->i_mapping->a_ops->bmap)
1177                res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1178        return res;
1179}
1180EXPORT_SYMBOL(bmap);
1181
1182/**
1183 *        touch_atime        -        update the access time
1184 *        @mnt: mount the inode is accessed on
1185 *        @dentry: dentry accessed
1186 *
1187 *        Update the accessed time on an inode and mark it for writeback.
1188 *        This function automatically handles read only file systems and media,
1189 *        as well as the "noatime" flag and inode specific "noatime" markers.
1190 */
1191void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1192{
1193        struct inode *inode = dentry->d_inode;
1194        struct timespec now;
1195
1196        if (mnt_want_write(mnt))
1197                return;
1198        if (inode->i_flags & S_NOATIME)
1199                goto out;
1200        if (IS_NOATIME(inode))
1201                goto out;
1202        if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1203                goto out;
1204
1205        if (mnt->mnt_flags & MNT_NOATIME)
1206                goto out;
1207        if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1208                goto out;
1209        if (mnt->mnt_flags & MNT_RELATIME) {
1210                /*
1211                 * With relative atime, only update atime if the previous
1212                 * atime is earlier than either the ctime or mtime.
1213                 */
1214                if (timespec_compare(&inode->i_mtime, &inode->i_atime) < 0 &&
1215                    timespec_compare(&inode->i_ctime, &inode->i_atime) < 0)
1216                        goto out;
1217        }
1218
1219        now = current_fs_time(inode->i_sb);
1220        if (timespec_equal(&inode->i_atime, &now))
1221                goto out;
1222
1223        inode->i_atime = now;
1224        mark_inode_dirty_sync(inode);
1225out:
1226        mnt_drop_write(mnt);
1227}
1228EXPORT_SYMBOL(touch_atime);
1229
1230/**
1231 *        file_update_time        -        update mtime and ctime time
1232 *        @file: file accessed
1233 *
1234 *        Update the mtime and ctime members of an inode and mark the inode
1235 *        for writeback.  Note that this function is meant exclusively for
1236 *        usage in the file write path of filesystems, and filesystems may
1237 *        choose to explicitly ignore update via this function with the
1238 *        S_NOCTIME inode flag, e.g. for network filesystem where these
1239 *        timestamps are handled by the server.
1240 */
1241
1242void file_update_time(struct file *file)
1243{
1244        struct inode *inode = file->f_path.dentry->d_inode;
1245        struct timespec now;
1246        int sync_it = 0;
1247        int err;
1248
1249        if (IS_NOCMTIME(inode))
1250                return;
1251
1252        err = mnt_want_write(file->f_path.mnt);
1253        if (err)
1254                return;
1255
1256        now = current_fs_time(inode->i_sb);
1257        if (!timespec_equal(&inode->i_mtime, &now)) {
1258                inode->i_mtime = now;
1259                sync_it = 1;
1260        }
1261
1262        if (!timespec_equal(&inode->i_ctime, &now)) {
1263                inode->i_ctime = now;
1264                sync_it = 1;
1265        }
1266
1267        if (IS_I_VERSION(inode)) {
1268                inode_inc_iversion(inode);
1269                sync_it = 1;
1270        }
1271
1272        if (sync_it)
1273                mark_inode_dirty_sync(inode);
1274        mnt_drop_write(file->f_path.mnt);
1275}
1276
1277EXPORT_SYMBOL(file_update_time);
1278
1279int inode_needs_sync(struct inode *inode)
1280{
1281        if (IS_SYNC(inode))
1282                return 1;
1283        if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1284                return 1;
1285        return 0;
1286}
1287
1288EXPORT_SYMBOL(inode_needs_sync);
1289
1290int inode_wait(void *word)
1291{
1292        schedule();
1293        return 0;
1294}
1295
1296/*
1297 * If we try to find an inode in the inode hash while it is being
1298 * deleted, we have to wait until the filesystem completes its
1299 * deletion before reporting that it isn't found.  This function waits
1300 * until the deletion _might_ have completed.  Callers are responsible
1301 * to recheck inode state.
1302 *
1303 * It doesn't matter if I_LOCK is not set initially, a call to
1304 * wake_up_inode() after removing from the hash list will DTRT.
1305 *
1306 * This is called with inode_lock held.
1307 */
1308static void __wait_on_freeing_inode(struct inode *inode)
1309{
1310        wait_queue_head_t *wq;
1311        DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
1312        wq = bit_waitqueue(&inode->i_state, __I_LOCK);
1313        prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1314        spin_unlock(&inode_lock);
1315        schedule();
1316        finish_wait(wq, &wait.wait);
1317        spin_lock(&inode_lock);
1318}
1319
1320/*
1321 * We rarely want to lock two inodes that do not have a parent/child
1322 * relationship (such as directory, child inode) simultaneously. The
1323 * vast majority of file systems should be able to get along fine
1324 * without this. Do not use these functions except as a last resort.
1325 */
1326void inode_double_lock(struct inode *inode1, struct inode *inode2)
1327{
1328        if (inode1 == NULL || inode2 == NULL || inode1 == inode2) {
1329                if (inode1)
1330                        mutex_lock(&inode1->i_mutex);
1331                else if (inode2)
1332                        mutex_lock(&inode2->i_mutex);
1333                return;
1334        }
1335
1336        if (inode1 < inode2) {
1337                mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
1338                mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
1339        } else {
1340                mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
1341                mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
1342        }
1343}
1344EXPORT_SYMBOL(inode_double_lock);
1345
1346void inode_double_unlock(struct inode *inode1, struct inode *inode2)
1347{
1348        if (inode1)
1349                mutex_unlock(&inode1->i_mutex);
1350
1351        if (inode2 && inode2 != inode1)
1352                mutex_unlock(&inode2->i_mutex);
1353}
1354EXPORT_SYMBOL(inode_double_unlock);
1355
1356static __initdata unsigned long ihash_entries;
1357static int __init set_ihash_entries(char *str)
1358{
1359        if (!str)
1360                return 0;
1361        ihash_entries = simple_strtoul(str, &str, 0);
1362        return 1;
1363}
1364__setup("ihash_entries=", set_ihash_entries);
1365
1366/*
1367 * Initialize the waitqueues and inode hash table.
1368 */
1369void __init inode_init_early(void)
1370{
1371        int loop;
1372
1373        /* If hashes are distributed across NUMA nodes, defer
1374         * hash allocation until vmalloc space is available.
1375         */
1376        if (hashdist)
1377                return;
1378
1379        inode_hashtable =
1380                alloc_large_system_hash("Inode-cache",
1381                                        sizeof(struct hlist_head),
1382                                        ihash_entries,
1383                                        14,
1384                                        HASH_EARLY,
1385                                        &i_hash_shift,
1386                                        &i_hash_mask,
1387                                        0);
1388
1389        for (loop = 0; loop < (1 << i_hash_shift); loop++)
1390                INIT_HLIST_HEAD(&inode_hashtable[loop]);
1391}
1392
1393void __init inode_init(void)
1394{
1395        int loop;
1396
1397        /* inode slab cache */
1398        inode_cachep = kmem_cache_create("inode_cache",
1399                                         sizeof(struct inode),
1400                                         0,
1401                                         (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1402                                         SLAB_MEM_SPREAD),
1403                                         init_once);
1404        register_shrinker(&icache_shrinker);
1405
1406        /* Hash may have been set up in inode_init_early */
1407        if (!hashdist)
1408                return;
1409
1410        inode_hashtable =
1411                alloc_large_system_hash("Inode-cache",
1412                                        sizeof(struct hlist_head),
1413                                        ihash_entries,
1414                                        14,
1415                                        0,
1416                                        &i_hash_shift,
1417                                        &i_hash_mask,
1418                                        0);
1419
1420        for (loop = 0; loop < (1 << i_hash_shift); loop++)
1421                INIT_HLIST_HEAD(&inode_hashtable[loop]);
1422}
1423
1424void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1425{
1426        inode->i_mode = mode;
1427        if (S_ISCHR(mode)) {
1428                inode->i_fop = &def_chr_fops;
1429                inode->i_rdev = rdev;
1430        } else if (S_ISBLK(mode)) {
1431                inode->i_fop = &def_blk_fops;
1432                inode->i_rdev = rdev;
1433        } else if (S_ISFIFO(mode))
1434                inode->i_fop = &def_fifo_fops;
1435        else if (S_ISSOCK(mode))
1436                inode->i_fop = &bad_sock_fops;
1437        else
1438                printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
1439                       mode);
1440}
1441EXPORT_SYMBOL(init_special_inode);