Showing error 909

User: Jiri Slaby
Error type: Double Lock
Error type description: Some lock is locked twice unintentionally in a sequence
File location: fs/super.c
Line in file: 343
Project: Linux Kernel
Project version: 2.6.28
Tools: Undetermined 1
Clang Static Analyzer (3.0)
Entered: 2012-02-27 21:22:42 UTC


Source:

  1/*
  2 *  linux/fs/super.c
  3 *
  4 *  Copyright (C) 1991, 1992  Linus Torvalds
  5 *
  6 *  super.c contains code to handle: - mount structures
  7 *                                   - super-block tables
  8 *                                   - filesystem drivers list
  9 *                                   - mount system call
 10 *                                   - umount system call
 11 *                                   - ustat system call
 12 *
 13 * GK 2/5/95  -  Changed to support mounting the root fs via NFS
 14 *
 15 *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
 16 *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
 17 *  Added options to /proc/mounts:
 18 *    Torbj��rn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
 19 *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
 20 *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
 21 */
 22
 23#include <linux/module.h>
 24#include <linux/slab.h>
 25#include <linux/init.h>
 26#include <linux/smp_lock.h>
 27#include <linux/acct.h>
 28#include <linux/blkdev.h>
 29#include <linux/quotaops.h>
 30#include <linux/namei.h>
 31#include <linux/buffer_head.h>                /* for fsync_super() */
 32#include <linux/mount.h>
 33#include <linux/security.h>
 34#include <linux/syscalls.h>
 35#include <linux/vfs.h>
 36#include <linux/writeback.h>                /* for the emergency remount stuff */
 37#include <linux/idr.h>
 38#include <linux/kobject.h>
 39#include <linux/mutex.h>
 40#include <linux/file.h>
 41#include <asm/uaccess.h>
 42#include "internal.h"
 43
 44
 45LIST_HEAD(super_blocks);
 46DEFINE_SPINLOCK(sb_lock);
 47
 48/**
 49 *        alloc_super        -        create new superblock
 50 *        @type:        filesystem type superblock should belong to
 51 *
 52 *        Allocates and initializes a new &struct super_block.  alloc_super()
 53 *        returns a pointer new superblock or %NULL if allocation had failed.
 54 */
 55static struct super_block *alloc_super(struct file_system_type *type)
 56{
 57        struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
 58        static struct super_operations default_op;
 59
 60        if (s) {
 61                if (security_sb_alloc(s)) {
 62                        kfree(s);
 63                        s = NULL;
 64                        goto out;
 65                }
 66                INIT_LIST_HEAD(&s->s_dirty);
 67                INIT_LIST_HEAD(&s->s_io);
 68                INIT_LIST_HEAD(&s->s_more_io);
 69                INIT_LIST_HEAD(&s->s_files);
 70                INIT_LIST_HEAD(&s->s_instances);
 71                INIT_HLIST_HEAD(&s->s_anon);
 72                INIT_LIST_HEAD(&s->s_inodes);
 73                INIT_LIST_HEAD(&s->s_dentry_lru);
 74                init_rwsem(&s->s_umount);
 75                mutex_init(&s->s_lock);
 76                lockdep_set_class(&s->s_umount, &type->s_umount_key);
 77                /*
 78                 * The locking rules for s_lock are up to the
 79                 * filesystem. For example ext3fs has different
 80                 * lock ordering than usbfs:
 81                 */
 82                lockdep_set_class(&s->s_lock, &type->s_lock_key);
 83                down_write(&s->s_umount);
 84                s->s_count = S_BIAS;
 85                atomic_set(&s->s_active, 1);
 86                mutex_init(&s->s_vfs_rename_mutex);
 87                mutex_init(&s->s_dquot.dqio_mutex);
 88                mutex_init(&s->s_dquot.dqonoff_mutex);
 89                init_rwsem(&s->s_dquot.dqptr_sem);
 90                init_waitqueue_head(&s->s_wait_unfrozen);
 91                s->s_maxbytes = MAX_NON_LFS;
 92                s->dq_op = sb_dquot_ops;
 93                s->s_qcop = sb_quotactl_ops;
 94                s->s_op = &default_op;
 95                s->s_time_gran = 1000000000;
 96        }
 97out:
 98        return s;
 99}
100
101/**
102 *        destroy_super        -        frees a superblock
103 *        @s: superblock to free
104 *
105 *        Frees a superblock.
106 */
107static inline void destroy_super(struct super_block *s)
108{
109        security_sb_free(s);
110        kfree(s->s_subtype);
111        kfree(s->s_options);
112        kfree(s);
113}
114
115/* Superblock refcounting  */
116
117/*
118 * Drop a superblock's refcount.  Returns non-zero if the superblock was
119 * destroyed.  The caller must hold sb_lock.
120 */
121static int __put_super(struct super_block *sb)
122{
123        int ret = 0;
124
125        if (!--sb->s_count) {
126                destroy_super(sb);
127                ret = 1;
128        }
129        return ret;
130}
131
132/*
133 * Drop a superblock's refcount.
134 * Returns non-zero if the superblock is about to be destroyed and
135 * at least is already removed from super_blocks list, so if we are
136 * making a loop through super blocks then we need to restart.
137 * The caller must hold sb_lock.
138 */
139int __put_super_and_need_restart(struct super_block *sb)
140{
141        /* check for race with generic_shutdown_super() */
142        if (list_empty(&sb->s_list)) {
143                /* super block is removed, need to restart... */
144                __put_super(sb);
145                return 1;
146        }
147        /* can't be the last, since s_list is still in use */
148        sb->s_count--;
149        BUG_ON(sb->s_count == 0);
150        return 0;
151}
152
153/**
154 *        put_super        -        drop a temporary reference to superblock
155 *        @sb: superblock in question
156 *
157 *        Drops a temporary reference, frees superblock if there's no
158 *        references left.
159 */
160static void put_super(struct super_block *sb)
161{
162        spin_lock(&sb_lock);
163        __put_super(sb);
164        spin_unlock(&sb_lock);
165}
166
167
168/**
169 *        deactivate_super        -        drop an active reference to superblock
170 *        @s: superblock to deactivate
171 *
172 *        Drops an active reference to superblock, acquiring a temprory one if
173 *        there is no active references left.  In that case we lock superblock,
174 *        tell fs driver to shut it down and drop the temporary reference we
175 *        had just acquired.
176 */
177void deactivate_super(struct super_block *s)
178{
179        struct file_system_type *fs = s->s_type;
180        if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
181                s->s_count -= S_BIAS-1;
182                spin_unlock(&sb_lock);
183                DQUOT_OFF(s, 0);
184                down_write(&s->s_umount);
185                fs->kill_sb(s);
186                put_filesystem(fs);
187                put_super(s);
188        }
189}
190
191EXPORT_SYMBOL(deactivate_super);
192
193/**
194 *        grab_super - acquire an active reference
195 *        @s: reference we are trying to make active
196 *
197 *        Tries to acquire an active reference.  grab_super() is used when we
198 *         had just found a superblock in super_blocks or fs_type->fs_supers
199 *        and want to turn it into a full-blown active reference.  grab_super()
200 *        is called with sb_lock held and drops it.  Returns 1 in case of
201 *        success, 0 if we had failed (superblock contents was already dead or
202 *        dying when grab_super() had been called).
203 */
204static int grab_super(struct super_block *s) __releases(sb_lock)
205{
206        s->s_count++;
207        spin_unlock(&sb_lock);
208        down_write(&s->s_umount);
209        if (s->s_root) {
210                spin_lock(&sb_lock);
211                if (s->s_count > S_BIAS) {
212                        atomic_inc(&s->s_active);
213                        s->s_count--;
214                        spin_unlock(&sb_lock);
215                        return 1;
216                }
217                spin_unlock(&sb_lock);
218        }
219        up_write(&s->s_umount);
220        put_super(s);
221        yield();
222        return 0;
223}
224
225/*
226 * Superblock locking.  We really ought to get rid of these two.
227 */
228void lock_super(struct super_block * sb)
229{
230        get_fs_excl();
231        mutex_lock(&sb->s_lock);
232}
233
234void unlock_super(struct super_block * sb)
235{
236        put_fs_excl();
237        mutex_unlock(&sb->s_lock);
238}
239
240EXPORT_SYMBOL(lock_super);
241EXPORT_SYMBOL(unlock_super);
242
243/*
244 * Write out and wait upon all dirty data associated with this
245 * superblock.  Filesystem data as well as the underlying block
246 * device.  Takes the superblock lock.  Requires a second blkdev
247 * flush by the caller to complete the operation.
248 */
249void __fsync_super(struct super_block *sb)
250{
251        sync_inodes_sb(sb, 0);
252        DQUOT_SYNC(sb);
253        lock_super(sb);
254        if (sb->s_dirt && sb->s_op->write_super)
255                sb->s_op->write_super(sb);
256        unlock_super(sb);
257        if (sb->s_op->sync_fs)
258                sb->s_op->sync_fs(sb, 1);
259        sync_blockdev(sb->s_bdev);
260        sync_inodes_sb(sb, 1);
261}
262
263/*
264 * Write out and wait upon all dirty data associated with this
265 * superblock.  Filesystem data as well as the underlying block
266 * device.  Takes the superblock lock.
267 */
268int fsync_super(struct super_block *sb)
269{
270        __fsync_super(sb);
271        return sync_blockdev(sb->s_bdev);
272}
273
274/**
275 *        generic_shutdown_super        -        common helper for ->kill_sb()
276 *        @sb: superblock to kill
277 *
278 *        generic_shutdown_super() does all fs-independent work on superblock
279 *        shutdown.  Typical ->kill_sb() should pick all fs-specific objects
280 *        that need destruction out of superblock, call generic_shutdown_super()
281 *        and release aforementioned objects.  Note: dentries and inodes _are_
282 *        taken care of and do not need specific handling.
283 *
284 *        Upon calling this function, the filesystem may no longer alter or
285 *        rearrange the set of dentries belonging to this super_block, nor may it
286 *        change the attachments of dentries to inodes.
287 */
288void generic_shutdown_super(struct super_block *sb)
289{
290        const struct super_operations *sop = sb->s_op;
291
292        if (sb->s_root) {
293                shrink_dcache_for_umount(sb);
294                fsync_super(sb);
295                lock_super(sb);
296                sb->s_flags &= ~MS_ACTIVE;
297                /* bad name - it should be evict_inodes() */
298                invalidate_inodes(sb);
299                lock_kernel();
300
301                if (sop->write_super && sb->s_dirt)
302                        sop->write_super(sb);
303                if (sop->put_super)
304                        sop->put_super(sb);
305
306                /* Forget any remaining inodes */
307                if (invalidate_inodes(sb)) {
308                        printk("VFS: Busy inodes after unmount of %s. "
309                           "Self-destruct in 5 seconds.  Have a nice day...\n",
310                           sb->s_id);
311                }
312
313                unlock_kernel();
314                unlock_super(sb);
315        }
316        spin_lock(&sb_lock);
317        /* should be initialized for __put_super_and_need_restart() */
318        list_del_init(&sb->s_list);
319        list_del(&sb->s_instances);
320        spin_unlock(&sb_lock);
321        up_write(&sb->s_umount);
322}
323
324EXPORT_SYMBOL(generic_shutdown_super);
325
326/**
327 *        sget        -        find or create a superblock
328 *        @type:        filesystem type superblock should belong to
329 *        @test:        comparison callback
330 *        @set:        setup callback
331 *        @data:        argument to each of them
332 */
333struct super_block *sget(struct file_system_type *type,
334                        int (*test)(struct super_block *,void *),
335                        int (*set)(struct super_block *,void *),
336                        void *data)
337{
338        struct super_block *s = NULL;
339        struct super_block *old;
340        int err;
341
342retry:
343        spin_lock(&sb_lock);
344        if (test) {
345                list_for_each_entry(old, &type->fs_supers, s_instances) {
346                        if (!test(old, data))
347                                continue;
348                        if (!grab_super(old))
349                                goto retry;
350                        if (s)
351                                destroy_super(s);
352                        return old;
353                }
354        }
355        if (!s) {
356                spin_unlock(&sb_lock);
357                s = alloc_super(type);
358                if (!s)
359                        return ERR_PTR(-ENOMEM);
360                goto retry;
361        }
362                
363        err = set(s, data);
364        if (err) {
365                spin_unlock(&sb_lock);
366                destroy_super(s);
367                return ERR_PTR(err);
368        }
369        s->s_type = type;
370        strlcpy(s->s_id, type->name, sizeof(s->s_id));
371        list_add_tail(&s->s_list, &super_blocks);
372        list_add(&s->s_instances, &type->fs_supers);
373        spin_unlock(&sb_lock);
374        get_filesystem(type);
375        return s;
376}
377
378EXPORT_SYMBOL(sget);
379
380void drop_super(struct super_block *sb)
381{
382        up_read(&sb->s_umount);
383        put_super(sb);
384}
385
386EXPORT_SYMBOL(drop_super);
387
388static inline void write_super(struct super_block *sb)
389{
390        lock_super(sb);
391        if (sb->s_root && sb->s_dirt)
392                if (sb->s_op->write_super)
393                        sb->s_op->write_super(sb);
394        unlock_super(sb);
395}
396
397/*
398 * Note: check the dirty flag before waiting, so we don't
399 * hold up the sync while mounting a device. (The newly
400 * mounted device won't need syncing.)
401 */
402void sync_supers(void)
403{
404        struct super_block *sb;
405
406        spin_lock(&sb_lock);
407restart:
408        list_for_each_entry(sb, &super_blocks, s_list) {
409                if (sb->s_dirt) {
410                        sb->s_count++;
411                        spin_unlock(&sb_lock);
412                        down_read(&sb->s_umount);
413                        write_super(sb);
414                        up_read(&sb->s_umount);
415                        spin_lock(&sb_lock);
416                        if (__put_super_and_need_restart(sb))
417                                goto restart;
418                }
419        }
420        spin_unlock(&sb_lock);
421}
422
423/*
424 * Call the ->sync_fs super_op against all filesystems which are r/w and
425 * which implement it.
426 *
427 * This operation is careful to avoid the livelock which could easily happen
428 * if two or more filesystems are being continuously dirtied.  s_need_sync_fs
429 * is used only here.  We set it against all filesystems and then clear it as
430 * we sync them.  So redirtied filesystems are skipped.
431 *
432 * But if process A is currently running sync_filesystems and then process B
433 * calls sync_filesystems as well, process B will set all the s_need_sync_fs
434 * flags again, which will cause process A to resync everything.  Fix that with
435 * a local mutex.
436 *
437 * (Fabian) Avoid sync_fs with clean fs & wait mode 0
438 */
439void sync_filesystems(int wait)
440{
441        struct super_block *sb;
442        static DEFINE_MUTEX(mutex);
443
444        mutex_lock(&mutex);                /* Could be down_interruptible */
445        spin_lock(&sb_lock);
446        list_for_each_entry(sb, &super_blocks, s_list) {
447                if (!sb->s_op->sync_fs)
448                        continue;
449                if (sb->s_flags & MS_RDONLY)
450                        continue;
451                sb->s_need_sync_fs = 1;
452        }
453
454restart:
455        list_for_each_entry(sb, &super_blocks, s_list) {
456                if (!sb->s_need_sync_fs)
457                        continue;
458                sb->s_need_sync_fs = 0;
459                if (sb->s_flags & MS_RDONLY)
460                        continue;        /* hm.  Was remounted r/o meanwhile */
461                sb->s_count++;
462                spin_unlock(&sb_lock);
463                down_read(&sb->s_umount);
464                if (sb->s_root && (wait || sb->s_dirt))
465                        sb->s_op->sync_fs(sb, wait);
466                up_read(&sb->s_umount);
467                /* restart only when sb is no longer on the list */
468                spin_lock(&sb_lock);
469                if (__put_super_and_need_restart(sb))
470                        goto restart;
471        }
472        spin_unlock(&sb_lock);
473        mutex_unlock(&mutex);
474}
475
476/**
477 *        get_super - get the superblock of a device
478 *        @bdev: device to get the superblock for
479 *        
480 *        Scans the superblock list and finds the superblock of the file system
481 *        mounted on the device given. %NULL is returned if no match is found.
482 */
483
484struct super_block * get_super(struct block_device *bdev)
485{
486        struct super_block *sb;
487
488        if (!bdev)
489                return NULL;
490
491        spin_lock(&sb_lock);
492rescan:
493        list_for_each_entry(sb, &super_blocks, s_list) {
494                if (sb->s_bdev == bdev) {
495                        sb->s_count++;
496                        spin_unlock(&sb_lock);
497                        down_read(&sb->s_umount);
498                        if (sb->s_root)
499                                return sb;
500                        up_read(&sb->s_umount);
501                        /* restart only when sb is no longer on the list */
502                        spin_lock(&sb_lock);
503                        if (__put_super_and_need_restart(sb))
504                                goto rescan;
505                }
506        }
507        spin_unlock(&sb_lock);
508        return NULL;
509}
510
511EXPORT_SYMBOL(get_super);
512 
513struct super_block * user_get_super(dev_t dev)
514{
515        struct super_block *sb;
516
517        spin_lock(&sb_lock);
518rescan:
519        list_for_each_entry(sb, &super_blocks, s_list) {
520                if (sb->s_dev ==  dev) {
521                        sb->s_count++;
522                        spin_unlock(&sb_lock);
523                        down_read(&sb->s_umount);
524                        if (sb->s_root)
525                                return sb;
526                        up_read(&sb->s_umount);
527                        /* restart only when sb is no longer on the list */
528                        spin_lock(&sb_lock);
529                        if (__put_super_and_need_restart(sb))
530                                goto rescan;
531                }
532        }
533        spin_unlock(&sb_lock);
534        return NULL;
535}
536
537asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf)
538{
539        struct super_block *s;
540        struct ustat tmp;
541        struct kstatfs sbuf;
542        int err = -EINVAL;
543
544        s = user_get_super(new_decode_dev(dev));
545        if (s == NULL)
546                goto out;
547        err = vfs_statfs(s->s_root, &sbuf);
548        drop_super(s);
549        if (err)
550                goto out;
551
552        memset(&tmp,0,sizeof(struct ustat));
553        tmp.f_tfree = sbuf.f_bfree;
554        tmp.f_tinode = sbuf.f_ffree;
555
556        err = copy_to_user(ubuf,&tmp,sizeof(struct ustat)) ? -EFAULT : 0;
557out:
558        return err;
559}
560
561/**
562 *        mark_files_ro - mark all files read-only
563 *        @sb: superblock in question
564 *
565 *        All files are marked read-only.  We don't care about pending
566 *        delete files so this should be used in 'force' mode only.
567 */
568
569static void mark_files_ro(struct super_block *sb)
570{
571        struct file *f;
572
573retry:
574        file_list_lock();
575        list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
576                struct vfsmount *mnt;
577                if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
578                       continue;
579                if (!file_count(f))
580                        continue;
581                if (!(f->f_mode & FMODE_WRITE))
582                        continue;
583                f->f_mode &= ~FMODE_WRITE;
584                if (file_check_writeable(f) != 0)
585                        continue;
586                file_release_write(f);
587                mnt = mntget(f->f_path.mnt);
588                file_list_unlock();
589                /*
590                 * This can sleep, so we can't hold
591                 * the file_list_lock() spinlock.
592                 */
593                mnt_drop_write(mnt);
594                mntput(mnt);
595                goto retry;
596        }
597        file_list_unlock();
598}
599
600/**
601 *        do_remount_sb - asks filesystem to change mount options.
602 *        @sb:        superblock in question
603 *        @flags:        numeric part of options
604 *        @data:        the rest of options
605 *      @force: whether or not to force the change
606 *
607 *        Alters the mount options of a mounted file system.
608 */
609int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
610{
611        int retval;
612        int remount_rw;
613        
614#ifdef CONFIG_BLOCK
615        if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
616                return -EACCES;
617#endif
618        if (flags & MS_RDONLY)
619                acct_auto_close(sb);
620        shrink_dcache_sb(sb);
621        fsync_super(sb);
622
623        /* If we are remounting RDONLY and current sb is read/write,
624           make sure there are no rw files opened */
625        if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) {
626                if (force)
627                        mark_files_ro(sb);
628                else if (!fs_may_remount_ro(sb))
629                        return -EBUSY;
630                retval = DQUOT_OFF(sb, 1);
631                if (retval < 0 && retval != -ENOSYS)
632                        return -EBUSY;
633        }
634        remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
635
636        if (sb->s_op->remount_fs) {
637                lock_super(sb);
638                retval = sb->s_op->remount_fs(sb, &flags, data);
639                unlock_super(sb);
640                if (retval)
641                        return retval;
642        }
643        sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
644        if (remount_rw)
645                DQUOT_ON_REMOUNT(sb);
646        return 0;
647}
648
649static void do_emergency_remount(unsigned long foo)
650{
651        struct super_block *sb;
652
653        spin_lock(&sb_lock);
654        list_for_each_entry(sb, &super_blocks, s_list) {
655                sb->s_count++;
656                spin_unlock(&sb_lock);
657                down_read(&sb->s_umount);
658                if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
659                        /*
660                         * ->remount_fs needs lock_kernel().
661                         *
662                         * What lock protects sb->s_flags??
663                         */
664                        lock_kernel();
665                        do_remount_sb(sb, MS_RDONLY, NULL, 1);
666                        unlock_kernel();
667                }
668                drop_super(sb);
669                spin_lock(&sb_lock);
670        }
671        spin_unlock(&sb_lock);
672        printk("Emergency Remount complete\n");
673}
674
675void emergency_remount(void)
676{
677        pdflush_operation(do_emergency_remount, 0);
678}
679
680/*
681 * Unnamed block devices are dummy devices used by virtual
682 * filesystems which don't use real block-devices.  -- jrs
683 */
684
685static DEFINE_IDA(unnamed_dev_ida);
686static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
687
688int set_anon_super(struct super_block *s, void *data)
689{
690        int dev;
691        int error;
692
693 retry:
694        if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
695                return -ENOMEM;
696        spin_lock(&unnamed_dev_lock);
697        error = ida_get_new(&unnamed_dev_ida, &dev);
698        spin_unlock(&unnamed_dev_lock);
699        if (error == -EAGAIN)
700                /* We raced and lost with another CPU. */
701                goto retry;
702        else if (error)
703                return -EAGAIN;
704
705        if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
706                spin_lock(&unnamed_dev_lock);
707                ida_remove(&unnamed_dev_ida, dev);
708                spin_unlock(&unnamed_dev_lock);
709                return -EMFILE;
710        }
711        s->s_dev = MKDEV(0, dev & MINORMASK);
712        return 0;
713}
714
715EXPORT_SYMBOL(set_anon_super);
716
717void kill_anon_super(struct super_block *sb)
718{
719        int slot = MINOR(sb->s_dev);
720
721        generic_shutdown_super(sb);
722        spin_lock(&unnamed_dev_lock);
723        ida_remove(&unnamed_dev_ida, slot);
724        spin_unlock(&unnamed_dev_lock);
725}
726
727EXPORT_SYMBOL(kill_anon_super);
728
729void kill_litter_super(struct super_block *sb)
730{
731        if (sb->s_root)
732                d_genocide(sb->s_root);
733        kill_anon_super(sb);
734}
735
736EXPORT_SYMBOL(kill_litter_super);
737
738#ifdef CONFIG_BLOCK
739static int set_bdev_super(struct super_block *s, void *data)
740{
741        s->s_bdev = data;
742        s->s_dev = s->s_bdev->bd_dev;
743        return 0;
744}
745
746static int test_bdev_super(struct super_block *s, void *data)
747{
748        return (void *)s->s_bdev == data;
749}
750
751int get_sb_bdev(struct file_system_type *fs_type,
752        int flags, const char *dev_name, void *data,
753        int (*fill_super)(struct super_block *, void *, int),
754        struct vfsmount *mnt)
755{
756        struct block_device *bdev;
757        struct super_block *s;
758        fmode_t mode = FMODE_READ;
759        int error = 0;
760
761        if (!(flags & MS_RDONLY))
762                mode |= FMODE_WRITE;
763
764        bdev = open_bdev_exclusive(dev_name, mode, fs_type);
765        if (IS_ERR(bdev))
766                return PTR_ERR(bdev);
767
768        /*
769         * once the super is inserted into the list by sget, s_umount
770         * will protect the lockfs code from trying to start a snapshot
771         * while we are mounting
772         */
773        down(&bdev->bd_mount_sem);
774        s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
775        up(&bdev->bd_mount_sem);
776        if (IS_ERR(s))
777                goto error_s;
778
779        if (s->s_root) {
780                if ((flags ^ s->s_flags) & MS_RDONLY) {
781                        up_write(&s->s_umount);
782                        deactivate_super(s);
783                        error = -EBUSY;
784                        goto error_bdev;
785                }
786
787                close_bdev_exclusive(bdev, mode);
788        } else {
789                char b[BDEVNAME_SIZE];
790
791                s->s_flags = flags;
792                s->s_mode = mode;
793                strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
794                sb_set_blocksize(s, block_size(bdev));
795                error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
796                if (error) {
797                        up_write(&s->s_umount);
798                        deactivate_super(s);
799                        goto error;
800                }
801
802                s->s_flags |= MS_ACTIVE;
803        }
804
805        return simple_set_mnt(mnt, s);
806
807error_s:
808        error = PTR_ERR(s);
809error_bdev:
810        close_bdev_exclusive(bdev, mode);
811error:
812        return error;
813}
814
815EXPORT_SYMBOL(get_sb_bdev);
816
817void kill_block_super(struct super_block *sb)
818{
819        struct block_device *bdev = sb->s_bdev;
820        fmode_t mode = sb->s_mode;
821
822        generic_shutdown_super(sb);
823        sync_blockdev(bdev);
824        close_bdev_exclusive(bdev, mode);
825}
826
827EXPORT_SYMBOL(kill_block_super);
828#endif
829
830int get_sb_nodev(struct file_system_type *fs_type,
831        int flags, void *data,
832        int (*fill_super)(struct super_block *, void *, int),
833        struct vfsmount *mnt)
834{
835        int error;
836        struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
837
838        if (IS_ERR(s))
839                return PTR_ERR(s);
840
841        s->s_flags = flags;
842
843        error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
844        if (error) {
845                up_write(&s->s_umount);
846                deactivate_super(s);
847                return error;
848        }
849        s->s_flags |= MS_ACTIVE;
850        return simple_set_mnt(mnt, s);
851}
852
853EXPORT_SYMBOL(get_sb_nodev);
854
855static int compare_single(struct super_block *s, void *p)
856{
857        return 1;
858}
859
860int get_sb_single(struct file_system_type *fs_type,
861        int flags, void *data,
862        int (*fill_super)(struct super_block *, void *, int),
863        struct vfsmount *mnt)
864{
865        struct super_block *s;
866        int error;
867
868        s = sget(fs_type, compare_single, set_anon_super, NULL);
869        if (IS_ERR(s))
870                return PTR_ERR(s);
871        if (!s->s_root) {
872                s->s_flags = flags;
873                error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
874                if (error) {
875                        up_write(&s->s_umount);
876                        deactivate_super(s);
877                        return error;
878                }
879                s->s_flags |= MS_ACTIVE;
880        }
881        do_remount_sb(s, flags, data, 0);
882        return simple_set_mnt(mnt, s);
883}
884
885EXPORT_SYMBOL(get_sb_single);
886
887struct vfsmount *
888vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
889{
890        struct vfsmount *mnt;
891        char *secdata = NULL;
892        int error;
893
894        if (!type)
895                return ERR_PTR(-ENODEV);
896
897        error = -ENOMEM;
898        mnt = alloc_vfsmnt(name);
899        if (!mnt)
900                goto out;
901
902        if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
903                secdata = alloc_secdata();
904                if (!secdata)
905                        goto out_mnt;
906
907                error = security_sb_copy_data(data, secdata);
908                if (error)
909                        goto out_free_secdata;
910        }
911
912        error = type->get_sb(type, flags, name, data, mnt);
913        if (error < 0)
914                goto out_free_secdata;
915        BUG_ON(!mnt->mnt_sb);
916
917         error = security_sb_kern_mount(mnt->mnt_sb, secdata);
918         if (error)
919                 goto out_sb;
920
921        mnt->mnt_mountpoint = mnt->mnt_root;
922        mnt->mnt_parent = mnt;
923        up_write(&mnt->mnt_sb->s_umount);
924        free_secdata(secdata);
925        return mnt;
926out_sb:
927        dput(mnt->mnt_root);
928        up_write(&mnt->mnt_sb->s_umount);
929        deactivate_super(mnt->mnt_sb);
930out_free_secdata:
931        free_secdata(secdata);
932out_mnt:
933        free_vfsmnt(mnt);
934out:
935        return ERR_PTR(error);
936}
937
938EXPORT_SYMBOL_GPL(vfs_kern_mount);
939
940static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
941{
942        int err;
943        const char *subtype = strchr(fstype, '.');
944        if (subtype) {
945                subtype++;
946                err = -EINVAL;
947                if (!subtype[0])
948                        goto err;
949        } else
950                subtype = "";
951
952        mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
953        err = -ENOMEM;
954        if (!mnt->mnt_sb->s_subtype)
955                goto err;
956        return mnt;
957
958 err:
959        mntput(mnt);
960        return ERR_PTR(err);
961}
962
963struct vfsmount *
964do_kern_mount(const char *fstype, int flags, const char *name, void *data)
965{
966        struct file_system_type *type = get_fs_type(fstype);
967        struct vfsmount *mnt;
968        if (!type)
969                return ERR_PTR(-ENODEV);
970        mnt = vfs_kern_mount(type, flags, name, data);
971        if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
972            !mnt->mnt_sb->s_subtype)
973                mnt = fs_set_subtype(mnt, fstype);
974        put_filesystem(type);
975        return mnt;
976}
977EXPORT_SYMBOL_GPL(do_kern_mount);
978
979struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
980{
981        return vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
982}
983
984EXPORT_SYMBOL_GPL(kern_mount_data);