1
2
3
4
5
6
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/dcache.h>
10#include <linux/init.h>
11#include <linux/quotaops.h>
12#include <linux/slab.h>
13#include <linux/writeback.h>
14#include <linux/module.h>
15#include <linux/backing-dev.h>
16#include <linux/wait.h>
17#include <linux/hash.h>
18#include <linux/swap.h>
19#include <linux/security.h>
20#include <linux/pagemap.h>
21#include <linux/cdev.h>
22#include <linux/bootmem.h>
23#include <linux/inotify.h>
24#include <linux/mount.h>
25
26
27
28
29
30
31
32
33
34#include <linux/buffer_head.h>
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#define I_HASHBITS i_hash_shift
56#define I_HASHMASK i_hash_mask
57
58static unsigned int i_hash_mask __read_mostly;
59static unsigned int i_hash_shift __read_mostly;
60
61
62
63
64
65
66
67
68
69
70
71
72
73LIST_HEAD(inode_in_use);
74LIST_HEAD(inode_unused);
75static struct hlist_head *inode_hashtable __read_mostly;
76
77
78
79
80
81
82
83DEFINE_SPINLOCK(inode_lock);
84
85
86
87
88
89
90
91
92
93static DEFINE_MUTEX(iprune_mutex);
94
95
96
97
98struct inodes_stat_t inodes_stat;
99
100static struct kmem_cache * inode_cachep __read_mostly;
101
102static void wake_up_inode(struct inode *inode)
103{
104
105
106
107 smp_mb();
108 wake_up_bit(&inode->i_state, __I_LOCK);
109}
110
111static struct inode *alloc_inode(struct super_block *sb)
112{
113 static const struct address_space_operations empty_aops;
114 static struct inode_operations empty_iops;
115 static const struct file_operations empty_fops;
116 struct inode *inode;
117
118 if (sb->s_op->alloc_inode)
119 inode = sb->s_op->alloc_inode(sb);
120 else
121 inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
122
123 if (inode) {
124 struct address_space * const mapping = &inode->i_data;
125
126 inode->i_sb = sb;
127 inode->i_blkbits = sb->s_blocksize_bits;
128 inode->i_flags = 0;
129 atomic_set(&inode->i_count, 1);
130 inode->i_op = &empty_iops;
131 inode->i_fop = &empty_fops;
132 inode->i_nlink = 1;
133 atomic_set(&inode->i_writecount, 0);
134 inode->i_size = 0;
135 inode->i_blocks = 0;
136 inode->i_bytes = 0;
137 inode->i_generation = 0;
138#ifdef CONFIG_QUOTA
139 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
140#endif
141 inode->i_pipe = NULL;
142 inode->i_bdev = NULL;
143 inode->i_cdev = NULL;
144 inode->i_rdev = 0;
145 inode->dirtied_when = 0;
146 if (security_inode_alloc(inode)) {
147 if (inode->i_sb->s_op->destroy_inode)
148 inode->i_sb->s_op->destroy_inode(inode);
149 else
150 kmem_cache_free(inode_cachep, (inode));
151 return NULL;
152 }
153
154 spin_lock_init(&inode->i_lock);
155 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
156
157 mutex_init(&inode->i_mutex);
158 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
159
160 init_rwsem(&inode->i_alloc_sem);
161 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
162
163 mapping->a_ops = &empty_aops;
164 mapping->host = inode;
165 mapping->flags = 0;
166 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
167 mapping->assoc_mapping = NULL;
168 mapping->backing_dev_info = &default_backing_dev_info;
169 mapping->writeback_index = 0;
170
171
172
173
174
175
176 if (sb->s_bdev) {
177 struct backing_dev_info *bdi;
178
179 bdi = sb->s_bdev->bd_inode_backing_dev_info;
180 if (!bdi)
181 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
182 mapping->backing_dev_info = bdi;
183 }
184 inode->i_private = NULL;
185 inode->i_mapping = mapping;
186 }
187 return inode;
188}
189
190void destroy_inode(struct inode *inode)
191{
192 BUG_ON(inode_has_buffers(inode));
193 security_inode_free(inode);
194 if (inode->i_sb->s_op->destroy_inode)
195 inode->i_sb->s_op->destroy_inode(inode);
196 else
197 kmem_cache_free(inode_cachep, (inode));
198}
199
200
201
202
203
204
205
206void inode_init_once(struct inode *inode)
207{
208 memset(inode, 0, sizeof(*inode));
209 INIT_HLIST_NODE(&inode->i_hash);
210 INIT_LIST_HEAD(&inode->i_dentry);
211 INIT_LIST_HEAD(&inode->i_devices);
212 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
213 spin_lock_init(&inode->i_data.tree_lock);
214 spin_lock_init(&inode->i_data.i_mmap_lock);
215 INIT_LIST_HEAD(&inode->i_data.private_list);
216 spin_lock_init(&inode->i_data.private_lock);
217 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
218 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
219 i_size_ordered_init(inode);
220#ifdef CONFIG_INOTIFY
221 INIT_LIST_HEAD(&inode->inotify_watches);
222 mutex_init(&inode->inotify_mutex);
223#endif
224}
225
226EXPORT_SYMBOL(inode_init_once);
227
228static void init_once(void *foo)
229{
230 struct inode * inode = (struct inode *) foo;
231
232 inode_init_once(inode);
233}
234
235
236
237
238void __iget(struct inode * inode)
239{
240 if (atomic_read(&inode->i_count)) {
241 atomic_inc(&inode->i_count);
242 return;
243 }
244 atomic_inc(&inode->i_count);
245 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
246 list_move(&inode->i_list, &inode_in_use);
247 inodes_stat.nr_unused--;
248}
249
250
251
252
253
254
255
256
257
258void clear_inode(struct inode *inode)
259{
260 might_sleep();
261 invalidate_inode_buffers(inode);
262
263 BUG_ON(inode->i_data.nrpages);
264 BUG_ON(!(inode->i_state & I_FREEING));
265 BUG_ON(inode->i_state & I_CLEAR);
266 inode_sync_wait(inode);
267 DQUOT_DROP(inode);
268 if (inode->i_sb->s_op->clear_inode)
269 inode->i_sb->s_op->clear_inode(inode);
270 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
271 bd_forget(inode);
272 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
273 cd_forget(inode);
274 inode->i_state = I_CLEAR;
275}
276
277EXPORT_SYMBOL(clear_inode);
278
279
280
281
282
283
284
285
286static void dispose_list(struct list_head *head)
287{
288 int nr_disposed = 0;
289
290 while (!list_empty(head)) {
291 struct inode *inode;
292
293 inode = list_first_entry(head, struct inode, i_list);
294 list_del(&inode->i_list);
295
296 if (inode->i_data.nrpages)
297 truncate_inode_pages(&inode->i_data, 0);
298 clear_inode(inode);
299
300 spin_lock(&inode_lock);
301 hlist_del_init(&inode->i_hash);
302 list_del_init(&inode->i_sb_list);
303 spin_unlock(&inode_lock);
304
305 wake_up_inode(inode);
306 destroy_inode(inode);
307 nr_disposed++;
308 }
309 spin_lock(&inode_lock);
310 inodes_stat.nr_inodes -= nr_disposed;
311 spin_unlock(&inode_lock);
312}
313
314
315
316
317static int invalidate_list(struct list_head *head, struct list_head *dispose)
318{
319 struct list_head *next;
320 int busy = 0, count = 0;
321
322 next = head->next;
323 for (;;) {
324 struct list_head * tmp = next;
325 struct inode * inode;
326
327
328
329
330
331
332
333 cond_resched_lock(&inode_lock);
334
335 next = next->next;
336 if (tmp == head)
337 break;
338 inode = list_entry(tmp, struct inode, i_sb_list);
339 invalidate_inode_buffers(inode);
340 if (!atomic_read(&inode->i_count)) {
341 list_move(&inode->i_list, dispose);
342 inode->i_state |= I_FREEING;
343 count++;
344 continue;
345 }
346 busy = 1;
347 }
348
349 inodes_stat.nr_unused -= count;
350 return busy;
351}
352
353
354
355
356
357
358
359
360
361int invalidate_inodes(struct super_block * sb)
362{
363 int busy;
364 LIST_HEAD(throw_away);
365
366 mutex_lock(&iprune_mutex);
367 spin_lock(&inode_lock);
368 inotify_unmount_inodes(&sb->s_inodes);
369 busy = invalidate_list(&sb->s_inodes, &throw_away);
370 spin_unlock(&inode_lock);
371
372 dispose_list(&throw_away);
373 mutex_unlock(&iprune_mutex);
374
375 return busy;
376}
377
378EXPORT_SYMBOL(invalidate_inodes);
379
380static int can_unuse(struct inode *inode)
381{
382 if (inode->i_state)
383 return 0;
384 if (inode_has_buffers(inode))
385 return 0;
386 if (atomic_read(&inode->i_count))
387 return 0;
388 if (inode->i_data.nrpages)
389 return 0;
390 return 1;
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406static void prune_icache(int nr_to_scan)
407{
408 LIST_HEAD(freeable);
409 int nr_pruned = 0;
410 int nr_scanned;
411 unsigned long reap = 0;
412
413 mutex_lock(&iprune_mutex);
414 spin_lock(&inode_lock);
415 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
416 struct inode *inode;
417
418 if (list_empty(&inode_unused))
419 break;
420
421 inode = list_entry(inode_unused.prev, struct inode, i_list);
422
423 if (inode->i_state || atomic_read(&inode->i_count)) {
424 list_move(&inode->i_list, &inode_unused);
425 continue;
426 }
427 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
428 __iget(inode);
429 spin_unlock(&inode_lock);
430 if (remove_inode_buffers(inode))
431 reap += invalidate_mapping_pages(&inode->i_data,
432 0, -1);
433 iput(inode);
434 spin_lock(&inode_lock);
435
436 if (inode != list_entry(inode_unused.next,
437 struct inode, i_list))
438 continue;
439 if (!can_unuse(inode))
440 continue;
441 }
442 list_move(&inode->i_list, &freeable);
443 inode->i_state |= I_FREEING;
444 nr_pruned++;
445 }
446 inodes_stat.nr_unused -= nr_pruned;
447 if (current_is_kswapd())
448 __count_vm_events(KSWAPD_INODESTEAL, reap);
449 else
450 __count_vm_events(PGINODESTEAL, reap);
451 spin_unlock(&inode_lock);
452
453 dispose_list(&freeable);
454 mutex_unlock(&iprune_mutex);
455}
456
457
458
459
460
461
462
463
464
465
466static int shrink_icache_memory(int nr, gfp_t gfp_mask)
467{
468 if (nr) {
469
470
471
472
473
474 if (!(gfp_mask & __GFP_FS))
475 return -1;
476 prune_icache(nr);
477 }
478 return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
479}
480
481static struct shrinker icache_shrinker = {
482 .shrink = shrink_icache_memory,
483 .seeks = DEFAULT_SEEKS,
484};
485
486static void __wait_on_freeing_inode(struct inode *inode);
487
488
489
490
491
492
493static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data)
494{
495 struct hlist_node *node;
496 struct inode * inode = NULL;
497
498repeat:
499 hlist_for_each_entry(inode, node, head, i_hash) {
500 if (inode->i_sb != sb)
501 continue;
502 if (!test(inode, data))
503 continue;
504 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
505 __wait_on_freeing_inode(inode);
506 goto repeat;
507 }
508 break;
509 }
510 return node ? inode : NULL;
511}
512
513
514
515
516
517static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
518{
519 struct hlist_node *node;
520 struct inode * inode = NULL;
521
522repeat:
523 hlist_for_each_entry(inode, node, head, i_hash) {
524 if (inode->i_ino != ino)
525 continue;
526 if (inode->i_sb != sb)
527 continue;
528 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
529 __wait_on_freeing_inode(inode);
530 goto repeat;
531 }
532 break;
533 }
534 return node ? inode : NULL;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549struct inode *new_inode(struct super_block *sb)
550{
551
552
553
554
555
556 static unsigned int last_ino;
557 struct inode * inode;
558
559 spin_lock_prefetch(&inode_lock);
560
561 inode = alloc_inode(sb);
562 if (inode) {
563 spin_lock(&inode_lock);
564 inodes_stat.nr_inodes++;
565 list_add(&inode->i_list, &inode_in_use);
566 list_add(&inode->i_sb_list, &sb->s_inodes);
567 inode->i_ino = ++last_ino;
568 inode->i_state = 0;
569 spin_unlock(&inode_lock);
570 }
571 return inode;
572}
573
574EXPORT_SYMBOL(new_inode);
575
576void unlock_new_inode(struct inode *inode)
577{
578#ifdef CONFIG_DEBUG_LOCK_ALLOC
579 if (inode->i_mode & S_IFDIR) {
580 struct file_system_type *type = inode->i_sb->s_type;
581
582
583
584
585 mutex_destroy(&inode->i_mutex);
586 mutex_init(&inode->i_mutex);
587 lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key);
588 }
589#endif
590
591
592
593
594
595
596
597
598 inode->i_state &= ~(I_LOCK|I_NEW);
599 wake_up_inode(inode);
600}
601
602EXPORT_SYMBOL(unlock_new_inode);
603
604
605
606
607
608
609
610static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
611{
612 struct inode * inode;
613
614 inode = alloc_inode(sb);
615 if (inode) {
616 struct inode * old;
617
618 spin_lock(&inode_lock);
619
620 old = find_inode(sb, head, test, data);
621 if (!old) {
622 if (set(inode, data))
623 goto set_failed;
624
625 inodes_stat.nr_inodes++;
626 list_add(&inode->i_list, &inode_in_use);
627 list_add(&inode->i_sb_list, &sb->s_inodes);
628 hlist_add_head(&inode->i_hash, head);
629 inode->i_state = I_LOCK|I_NEW;
630 spin_unlock(&inode_lock);
631
632
633
634
635 return inode;
636 }
637
638
639
640
641
642
643 __iget(old);
644 spin_unlock(&inode_lock);
645 destroy_inode(inode);
646 inode = old;
647 wait_on_inode(inode);
648 }
649 return inode;
650
651set_failed:
652 spin_unlock(&inode_lock);
653 destroy_inode(inode);
654 return NULL;
655}
656
657
658
659
660
661static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino)
662{
663 struct inode * inode;
664
665 inode = alloc_inode(sb);
666 if (inode) {
667 struct inode * old;
668
669 spin_lock(&inode_lock);
670
671 old = find_inode_fast(sb, head, ino);
672 if (!old) {
673 inode->i_ino = ino;
674 inodes_stat.nr_inodes++;
675 list_add(&inode->i_list, &inode_in_use);
676 list_add(&inode->i_sb_list, &sb->s_inodes);
677 hlist_add_head(&inode->i_hash, head);
678 inode->i_state = I_LOCK|I_NEW;
679 spin_unlock(&inode_lock);
680
681
682
683
684 return inode;
685 }
686
687
688
689
690
691
692 __iget(old);
693 spin_unlock(&inode_lock);
694 destroy_inode(inode);
695 inode = old;
696 wait_on_inode(inode);
697 }
698 return inode;
699}
700
701static unsigned long hash(struct super_block *sb, unsigned long hashval)
702{
703 unsigned long tmp;
704
705 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
706 L1_CACHE_BYTES;
707 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
708 return tmp & I_HASHMASK;
709}
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725ino_t iunique(struct super_block *sb, ino_t max_reserved)
726{
727
728
729
730
731
732 static unsigned int counter;
733 struct inode *inode;
734 struct hlist_head *head;
735 ino_t res;
736
737 spin_lock(&inode_lock);
738 do {
739 if (counter <= max_reserved)
740 counter = max_reserved + 1;
741 res = counter++;
742 head = inode_hashtable + hash(sb, res);
743 inode = find_inode_fast(sb, head, res);
744 } while (inode != NULL);
745 spin_unlock(&inode_lock);
746
747 return res;
748}
749EXPORT_SYMBOL(iunique);
750
751struct inode *igrab(struct inode *inode)
752{
753 spin_lock(&inode_lock);
754 if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)))
755 __iget(inode);
756 else
757
758
759
760
761
762 inode = NULL;
763 spin_unlock(&inode_lock);
764 return inode;
765}
766
767EXPORT_SYMBOL(igrab);
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788static struct inode *ifind(struct super_block *sb,
789 struct hlist_head *head, int (*test)(struct inode *, void *),
790 void *data, const int wait)
791{
792 struct inode *inode;
793
794 spin_lock(&inode_lock);
795 inode = find_inode(sb, head, test, data);
796 if (inode) {
797 __iget(inode);
798 spin_unlock(&inode_lock);
799 if (likely(wait))
800 wait_on_inode(inode);
801 return inode;
802 }
803 spin_unlock(&inode_lock);
804 return NULL;
805}
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822static struct inode *ifind_fast(struct super_block *sb,
823 struct hlist_head *head, unsigned long ino)
824{
825 struct inode *inode;
826
827 spin_lock(&inode_lock);
828 inode = find_inode_fast(sb, head, ino);
829 if (inode) {
830 __iget(inode);
831 spin_unlock(&inode_lock);
832 wait_on_inode(inode);
833 return inode;
834 }
835 spin_unlock(&inode_lock);
836 return NULL;
837}
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
861 int (*test)(struct inode *, void *), void *data)
862{
863 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
864
865 return ifind(sb, head, test, data, 0);
866}
867
868EXPORT_SYMBOL(ilookup5_nowait);
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
890 int (*test)(struct inode *, void *), void *data)
891{
892 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
893
894 return ifind(sb, head, test, data, 1);
895}
896
897EXPORT_SYMBOL(ilookup5);
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913struct inode *ilookup(struct super_block *sb, unsigned long ino)
914{
915 struct hlist_head *head = inode_hashtable + hash(sb, ino);
916
917 return ifind_fast(sb, head, ino);
918}
919
920EXPORT_SYMBOL(ilookup);
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
943 int (*test)(struct inode *, void *),
944 int (*set)(struct inode *, void *), void *data)
945{
946 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
947 struct inode *inode;
948
949 inode = ifind(sb, head, test, data, 1);
950 if (inode)
951 return inode;
952
953
954
955
956 return get_new_inode(sb, head, test, set, data);
957}
958
959EXPORT_SYMBOL(iget5_locked);
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976struct inode *iget_locked(struct super_block *sb, unsigned long ino)
977{
978 struct hlist_head *head = inode_hashtable + hash(sb, ino);
979 struct inode *inode;
980
981 inode = ifind_fast(sb, head, ino);
982 if (inode)
983 return inode;
984
985
986
987
988 return get_new_inode_fast(sb, head, ino);
989}
990
991EXPORT_SYMBOL(iget_locked);
992
993
994
995
996
997
998
999
1000
1001void __insert_inode_hash(struct inode *inode, unsigned long hashval)
1002{
1003 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1004 spin_lock(&inode_lock);
1005 hlist_add_head(&inode->i_hash, head);
1006 spin_unlock(&inode_lock);
1007}
1008
1009EXPORT_SYMBOL(__insert_inode_hash);
1010
1011
1012
1013
1014
1015
1016
1017void remove_inode_hash(struct inode *inode)
1018{
1019 spin_lock(&inode_lock);
1020 hlist_del_init(&inode->i_hash);
1021 spin_unlock(&inode_lock);
1022}
1023
1024EXPORT_SYMBOL(remove_inode_hash);
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038void generic_delete_inode(struct inode *inode)
1039{
1040 const struct super_operations *op = inode->i_sb->s_op;
1041
1042 list_del_init(&inode->i_list);
1043 list_del_init(&inode->i_sb_list);
1044 inode->i_state |= I_FREEING;
1045 inodes_stat.nr_inodes--;
1046 spin_unlock(&inode_lock);
1047
1048 security_inode_delete(inode);
1049
1050 if (op->delete_inode) {
1051 void (*delete)(struct inode *) = op->delete_inode;
1052 if (!is_bad_inode(inode))
1053 DQUOT_INIT(inode);
1054
1055
1056
1057
1058 delete(inode);
1059 } else {
1060 truncate_inode_pages(&inode->i_data, 0);
1061 clear_inode(inode);
1062 }
1063 spin_lock(&inode_lock);
1064 hlist_del_init(&inode->i_hash);
1065 spin_unlock(&inode_lock);
1066 wake_up_inode(inode);
1067 BUG_ON(inode->i_state != I_CLEAR);
1068 destroy_inode(inode);
1069}
1070
1071EXPORT_SYMBOL(generic_delete_inode);
1072
1073static void generic_forget_inode(struct inode *inode)
1074{
1075 struct super_block *sb = inode->i_sb;
1076
1077 if (!hlist_unhashed(&inode->i_hash)) {
1078 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1079 list_move(&inode->i_list, &inode_unused);
1080 inodes_stat.nr_unused++;
1081 if (sb->s_flags & MS_ACTIVE) {
1082 spin_unlock(&inode_lock);
1083 return;
1084 }
1085 inode->i_state |= I_WILL_FREE;
1086 spin_unlock(&inode_lock);
1087 write_inode_now(inode, 1);
1088 spin_lock(&inode_lock);
1089 inode->i_state &= ~I_WILL_FREE;
1090 inodes_stat.nr_unused--;
1091 hlist_del_init(&inode->i_hash);
1092 }
1093 list_del_init(&inode->i_list);
1094 list_del_init(&inode->i_sb_list);
1095 inode->i_state |= I_FREEING;
1096 inodes_stat.nr_inodes--;
1097 spin_unlock(&inode_lock);
1098 if (inode->i_data.nrpages)
1099 truncate_inode_pages(&inode->i_data, 0);
1100 clear_inode(inode);
1101 wake_up_inode(inode);
1102 destroy_inode(inode);
1103}
1104
1105
1106
1107
1108
1109
1110void generic_drop_inode(struct inode *inode)
1111{
1112 if (!inode->i_nlink)
1113 generic_delete_inode(inode);
1114 else
1115 generic_forget_inode(inode);
1116}
1117
1118EXPORT_SYMBOL_GPL(generic_drop_inode);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static inline void iput_final(struct inode *inode)
1132{
1133 const struct super_operations *op = inode->i_sb->s_op;
1134 void (*drop)(struct inode *) = generic_drop_inode;
1135
1136 if (op && op->drop_inode)
1137 drop = op->drop_inode;
1138 drop(inode);
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150void iput(struct inode *inode)
1151{
1152 if (inode) {
1153 BUG_ON(inode->i_state == I_CLEAR);
1154
1155 if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
1156 iput_final(inode);
1157 }
1158}
1159
1160EXPORT_SYMBOL(iput);
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173sector_t bmap(struct inode * inode, sector_t block)
1174{
1175 sector_t res = 0;
1176 if (inode->i_mapping->a_ops->bmap)
1177 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1178 return res;
1179}
1180EXPORT_SYMBOL(bmap);
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1192{
1193 struct inode *inode = dentry->d_inode;
1194 struct timespec now;
1195
1196 if (mnt_want_write(mnt))
1197 return;
1198 if (inode->i_flags & S_NOATIME)
1199 goto out;
1200 if (IS_NOATIME(inode))
1201 goto out;
1202 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1203 goto out;
1204
1205 if (mnt->mnt_flags & MNT_NOATIME)
1206 goto out;
1207 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1208 goto out;
1209 if (mnt->mnt_flags & MNT_RELATIME) {
1210
1211
1212
1213
1214 if (timespec_compare(&inode->i_mtime, &inode->i_atime) < 0 &&
1215 timespec_compare(&inode->i_ctime, &inode->i_atime) < 0)
1216 goto out;
1217 }
1218
1219 now = current_fs_time(inode->i_sb);
1220 if (timespec_equal(&inode->i_atime, &now))
1221 goto out;
1222
1223 inode->i_atime = now;
1224 mark_inode_dirty_sync(inode);
1225out:
1226 mnt_drop_write(mnt);
1227}
1228EXPORT_SYMBOL(touch_atime);
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242void file_update_time(struct file *file)
1243{
1244 struct inode *inode = file->f_path.dentry->d_inode;
1245 struct timespec now;
1246 int sync_it = 0;
1247 int err;
1248
1249 if (IS_NOCMTIME(inode))
1250 return;
1251
1252 err = mnt_want_write(file->f_path.mnt);
1253 if (err)
1254 return;
1255
1256 now = current_fs_time(inode->i_sb);
1257 if (!timespec_equal(&inode->i_mtime, &now)) {
1258 inode->i_mtime = now;
1259 sync_it = 1;
1260 }
1261
1262 if (!timespec_equal(&inode->i_ctime, &now)) {
1263 inode->i_ctime = now;
1264 sync_it = 1;
1265 }
1266
1267 if (IS_I_VERSION(inode)) {
1268 inode_inc_iversion(inode);
1269 sync_it = 1;
1270 }
1271
1272 if (sync_it)
1273 mark_inode_dirty_sync(inode);
1274 mnt_drop_write(file->f_path.mnt);
1275}
1276
1277EXPORT_SYMBOL(file_update_time);
1278
1279int inode_needs_sync(struct inode *inode)
1280{
1281 if (IS_SYNC(inode))
1282 return 1;
1283 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1284 return 1;
1285 return 0;
1286}
1287
1288EXPORT_SYMBOL(inode_needs_sync);
1289
1290int inode_wait(void *word)
1291{
1292 schedule();
1293 return 0;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static void __wait_on_freeing_inode(struct inode *inode)
1309{
1310 wait_queue_head_t *wq;
1311 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
1312 wq = bit_waitqueue(&inode->i_state, __I_LOCK);
1313 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1314 spin_unlock(&inode_lock);
1315 schedule();
1316 finish_wait(wq, &wait.wait);
1317 spin_lock(&inode_lock);
1318}
1319
1320
1321
1322
1323
1324
1325
1326void inode_double_lock(struct inode *inode1, struct inode *inode2)
1327{
1328 if (inode1 == NULL || inode2 == NULL || inode1 == inode2) {
1329 if (inode1)
1330 mutex_lock(&inode1->i_mutex);
1331 else if (inode2)
1332 mutex_lock(&inode2->i_mutex);
1333 return;
1334 }
1335
1336 if (inode1 < inode2) {
1337 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
1338 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
1339 } else {
1340 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
1341 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
1342 }
1343}
1344EXPORT_SYMBOL(inode_double_lock);
1345
1346void inode_double_unlock(struct inode *inode1, struct inode *inode2)
1347{
1348 if (inode1)
1349 mutex_unlock(&inode1->i_mutex);
1350
1351 if (inode2 && inode2 != inode1)
1352 mutex_unlock(&inode2->i_mutex);
1353}
1354EXPORT_SYMBOL(inode_double_unlock);
1355
1356static __initdata unsigned long ihash_entries;
1357static int __init set_ihash_entries(char *str)
1358{
1359 if (!str)
1360 return 0;
1361 ihash_entries = simple_strtoul(str, &str, 0);
1362 return 1;
1363}
1364__setup("ihash_entries=", set_ihash_entries);
1365
1366
1367
1368
1369void __init inode_init_early(void)
1370{
1371 int loop;
1372
1373
1374
1375
1376 if (hashdist)
1377 return;
1378
1379 inode_hashtable =
1380 alloc_large_system_hash("Inode-cache",
1381 sizeof(struct hlist_head),
1382 ihash_entries,
1383 14,
1384 HASH_EARLY,
1385 &i_hash_shift,
1386 &i_hash_mask,
1387 0);
1388
1389 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1390 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1391}
1392
1393void __init inode_init(void)
1394{
1395 int loop;
1396
1397
1398 inode_cachep = kmem_cache_create("inode_cache",
1399 sizeof(struct inode),
1400 0,
1401 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1402 SLAB_MEM_SPREAD),
1403 init_once);
1404 register_shrinker(&icache_shrinker);
1405
1406
1407 if (!hashdist)
1408 return;
1409
1410 inode_hashtable =
1411 alloc_large_system_hash("Inode-cache",
1412 sizeof(struct hlist_head),
1413 ihash_entries,
1414 14,
1415 0,
1416 &i_hash_shift,
1417 &i_hash_mask,
1418 0);
1419
1420 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1421 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1422}
1423
1424void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1425{
1426 inode->i_mode = mode;
1427 if (S_ISCHR(mode)) {
1428 inode->i_fop = &def_chr_fops;
1429 inode->i_rdev = rdev;
1430 } else if (S_ISBLK(mode)) {
1431 inode->i_fop = &def_blk_fops;
1432 inode->i_rdev = rdev;
1433 } else if (S_ISFIFO(mode))
1434 inode->i_fop = &def_fifo_fops;
1435 else if (S_ISSOCK(mode))
1436 inode->i_fop = &bad_sock_fops;
1437 else
1438 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
1439 mode);
1440}
1441EXPORT_SYMBOL(init_special_inode);