1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/fs.h>
27#include <linux/time.h>
28#include <linux/ext3_jbd.h>
29#include <linux/jbd.h>
30#include <linux/highuid.h>
31#include <linux/pagemap.h>
32#include <linux/quotaops.h>
33#include <linux/string.h>
34#include <linux/buffer_head.h>
35#include <linux/writeback.h>
36#include <linux/mpage.h>
37#include <linux/uio.h>
38#include <linux/bio.h>
39#include <linux/fiemap.h>
40#include "xattr.h"
41#include "acl.h"
42
43static int ext3_writepage_trans_blocks(struct inode *inode);
44
45
46
47
48static int ext3_inode_is_fast_symlink(struct inode *inode)
49{
50 int ea_blocks = EXT3_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
54}
55
56
57
58
59
60
61
62
63
64
65int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
66 struct buffer_head *bh, ext3_fsblk_t blocknr)
67{
68 int err;
69
70 might_sleep();
71
72 BUFFER_TRACE(bh, "enter");
73
74 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
75 "data mode %lx\n",
76 bh, is_metadata, inode->i_mode,
77 test_opt(inode->i_sb, DATA_FLAGS));
78
79
80
81
82
83
84 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
85 (!is_metadata && !ext3_should_journal_data(inode))) {
86 if (bh) {
87 BUFFER_TRACE(bh, "call journal_forget");
88 return ext3_journal_forget(handle, bh);
89 }
90 return 0;
91 }
92
93
94
95
96 BUFFER_TRACE(bh, "call ext3_journal_revoke");
97 err = ext3_journal_revoke(handle, blocknr, bh);
98 if (err)
99 ext3_abort(inode->i_sb, __func__,
100 "error %d when attempting revoke", err);
101 BUFFER_TRACE(bh, "exit");
102 return err;
103}
104
105
106
107
108
109static unsigned long blocks_for_truncate(struct inode *inode)
110{
111 unsigned long needed;
112
113 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
114
115
116
117
118
119
120
121 if (needed < 2)
122 needed = 2;
123
124
125
126 if (needed > EXT3_MAX_TRANS_DATA)
127 needed = EXT3_MAX_TRANS_DATA;
128
129 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
130}
131
132
133
134
135
136
137
138
139
140
141
142static handle_t *start_transaction(struct inode *inode)
143{
144 handle_t *result;
145
146 result = ext3_journal_start(inode, blocks_for_truncate(inode));
147 if (!IS_ERR(result))
148 return result;
149
150 ext3_std_error(inode->i_sb, PTR_ERR(result));
151 return result;
152}
153
154
155
156
157
158
159
160static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
161{
162 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
163 return 0;
164 if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
165 return 0;
166 return 1;
167}
168
169
170
171
172
173
174static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
175{
176 jbd_debug(2, "restarting handle %p\n", handle);
177 return ext3_journal_restart(handle, blocks_for_truncate(inode));
178}
179
180
181
182
183void ext3_delete_inode (struct inode * inode)
184{
185 handle_t *handle;
186
187 truncate_inode_pages(&inode->i_data, 0);
188
189 if (is_bad_inode(inode))
190 goto no_delete;
191
192 handle = start_transaction(inode);
193 if (IS_ERR(handle)) {
194
195
196
197
198
199 ext3_orphan_del(NULL, inode);
200 goto no_delete;
201 }
202
203 if (IS_SYNC(inode))
204 handle->h_sync = 1;
205 inode->i_size = 0;
206 if (inode->i_blocks)
207 ext3_truncate(inode);
208
209
210
211
212
213
214
215
216 ext3_orphan_del(handle, inode);
217 EXT3_I(inode)->i_dtime = get_seconds();
218
219
220
221
222
223
224
225
226 if (ext3_mark_inode_dirty(handle, inode))
227
228 clear_inode(inode);
229 else
230 ext3_free_inode(handle, inode);
231 ext3_journal_stop(handle);
232 return;
233no_delete:
234 clear_inode(inode);
235}
236
237typedef struct {
238 __le32 *p;
239 __le32 key;
240 struct buffer_head *bh;
241} Indirect;
242
243static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
244{
245 p->key = *(p->p = v);
246 p->bh = bh;
247}
248
249static int verify_chain(Indirect *from, Indirect *to)
250{
251 while (from <= to && from->key == *from->p)
252 from++;
253 return (from > to);
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287static int ext3_block_to_path(struct inode *inode,
288 long i_block, int offsets[4], int *boundary)
289{
290 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
291 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
292 const long direct_blocks = EXT3_NDIR_BLOCKS,
293 indirect_blocks = ptrs,
294 double_blocks = (1 << (ptrs_bits * 2));
295 int n = 0;
296 int final = 0;
297
298 if (i_block < 0) {
299 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
300 } else if (i_block < direct_blocks) {
301 offsets[n++] = i_block;
302 final = direct_blocks;
303 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
304 offsets[n++] = EXT3_IND_BLOCK;
305 offsets[n++] = i_block;
306 final = ptrs;
307 } else if ((i_block -= indirect_blocks) < double_blocks) {
308 offsets[n++] = EXT3_DIND_BLOCK;
309 offsets[n++] = i_block >> ptrs_bits;
310 offsets[n++] = i_block & (ptrs - 1);
311 final = ptrs;
312 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
313 offsets[n++] = EXT3_TIND_BLOCK;
314 offsets[n++] = i_block >> (ptrs_bits * 2);
315 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
316 offsets[n++] = i_block & (ptrs - 1);
317 final = ptrs;
318 } else {
319 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
320 }
321 if (boundary)
322 *boundary = final - 1 - (i_block & (ptrs - 1));
323 return n;
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
356 Indirect chain[4], int *err)
357{
358 struct super_block *sb = inode->i_sb;
359 Indirect *p = chain;
360 struct buffer_head *bh;
361
362 *err = 0;
363
364 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
365 if (!p->key)
366 goto no_block;
367 while (--depth) {
368 bh = sb_bread(sb, le32_to_cpu(p->key));
369 if (!bh)
370 goto failure;
371
372 if (!verify_chain(chain, p))
373 goto changed;
374 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
375
376 if (!p->key)
377 goto no_block;
378 }
379 return NULL;
380
381changed:
382 brelse(bh);
383 *err = -EAGAIN;
384 goto no_block;
385failure:
386 *err = -EIO;
387no_block:
388 return p;
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
412{
413 struct ext3_inode_info *ei = EXT3_I(inode);
414 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
415 __le32 *p;
416 ext3_fsblk_t bg_start;
417 ext3_grpblk_t colour;
418
419
420 for (p = ind->p - 1; p >= start; p--) {
421 if (*p)
422 return le32_to_cpu(*p);
423 }
424
425
426 if (ind->bh)
427 return ind->bh->b_blocknr;
428
429
430
431
432
433 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
434 colour = (current->pid % 16) *
435 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
436 return bg_start + colour;
437}
438
439
440
441
442
443
444
445
446
447
448
449static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
450 Indirect *partial)
451{
452 struct ext3_block_alloc_info *block_i;
453
454 block_i = EXT3_I(inode)->i_block_alloc_info;
455
456
457
458
459
460 if (block_i && (block == block_i->last_alloc_logical_block + 1)
461 && (block_i->last_alloc_physical_block != 0)) {
462 return block_i->last_alloc_physical_block + 1;
463 }
464
465 return ext3_find_near(inode, partial);
466}
467
468
469
470
471
472
473
474
475
476
477
478
479
480static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
481 int blocks_to_boundary)
482{
483 unsigned long count = 0;
484
485
486
487
488
489 if (k > 0) {
490
491 if (blks < blocks_to_boundary + 1)
492 count += blks;
493 else
494 count += blocks_to_boundary + 1;
495 return count;
496 }
497
498 count++;
499 while (count < blks && count <= blocks_to_boundary &&
500 le32_to_cpu(*(branch[0].p + count)) == 0) {
501 count++;
502 }
503 return count;
504}
505
506
507
508
509
510
511
512
513
514
515
516static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
517 ext3_fsblk_t goal, int indirect_blks, int blks,
518 ext3_fsblk_t new_blocks[4], int *err)
519{
520 int target, i;
521 unsigned long count = 0;
522 int index = 0;
523 ext3_fsblk_t current_block = 0;
524 int ret = 0;
525
526
527
528
529
530
531
532
533
534 target = blks + indirect_blks;
535
536 while (1) {
537 count = target;
538
539 current_block = ext3_new_blocks(handle,inode,goal,&count,err);
540 if (*err)
541 goto failed_out;
542
543 target -= count;
544
545 while (index < indirect_blks && count) {
546 new_blocks[index++] = current_block++;
547 count--;
548 }
549
550 if (count > 0)
551 break;
552 }
553
554
555 new_blocks[index] = current_block;
556
557
558 ret = count;
559 *err = 0;
560 return ret;
561failed_out:
562 for (i = 0; i <index; i++)
563 ext3_free_blocks(handle, inode, new_blocks[i], 1);
564 return ret;
565}
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
593 int indirect_blks, int *blks, ext3_fsblk_t goal,
594 int *offsets, Indirect *branch)
595{
596 int blocksize = inode->i_sb->s_blocksize;
597 int i, n = 0;
598 int err = 0;
599 struct buffer_head *bh;
600 int num;
601 ext3_fsblk_t new_blocks[4];
602 ext3_fsblk_t current_block;
603
604 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
605 *blks, new_blocks, &err);
606 if (err)
607 return err;
608
609 branch[0].key = cpu_to_le32(new_blocks[0]);
610
611
612
613 for (n = 1; n <= indirect_blks; n++) {
614
615
616
617
618
619 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
620 branch[n].bh = bh;
621 lock_buffer(bh);
622 BUFFER_TRACE(bh, "call get_create_access");
623 err = ext3_journal_get_create_access(handle, bh);
624 if (err) {
625 unlock_buffer(bh);
626 brelse(bh);
627 goto failed;
628 }
629
630 memset(bh->b_data, 0, blocksize);
631 branch[n].p = (__le32 *) bh->b_data + offsets[n];
632 branch[n].key = cpu_to_le32(new_blocks[n]);
633 *branch[n].p = branch[n].key;
634 if ( n == indirect_blks) {
635 current_block = new_blocks[n];
636
637
638
639
640
641 for (i=1; i < num; i++)
642 *(branch[n].p + i) = cpu_to_le32(++current_block);
643 }
644 BUFFER_TRACE(bh, "marking uptodate");
645 set_buffer_uptodate(bh);
646 unlock_buffer(bh);
647
648 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
649 err = ext3_journal_dirty_metadata(handle, bh);
650 if (err)
651 goto failed;
652 }
653 *blks = num;
654 return err;
655failed:
656
657 for (i = 1; i <= n ; i++) {
658 BUFFER_TRACE(branch[i].bh, "call journal_forget");
659 ext3_journal_forget(handle, branch[i].bh);
660 }
661 for (i = 0; i <indirect_blks; i++)
662 ext3_free_blocks(handle, inode, new_blocks[i], 1);
663
664 ext3_free_blocks(handle, inode, new_blocks[i], num);
665
666 return err;
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683static int ext3_splice_branch(handle_t *handle, struct inode *inode,
684 long block, Indirect *where, int num, int blks)
685{
686 int i;
687 int err = 0;
688 struct ext3_block_alloc_info *block_i;
689 ext3_fsblk_t current_block;
690
691 block_i = EXT3_I(inode)->i_block_alloc_info;
692
693
694
695
696
697 if (where->bh) {
698 BUFFER_TRACE(where->bh, "get_write_access");
699 err = ext3_journal_get_write_access(handle, where->bh);
700 if (err)
701 goto err_out;
702 }
703
704
705 *where->p = where->key;
706
707
708
709
710
711 if (num == 0 && blks > 1) {
712 current_block = le32_to_cpu(where->key) + 1;
713 for (i = 1; i < blks; i++)
714 *(where->p + i ) = cpu_to_le32(current_block++);
715 }
716
717
718
719
720
721
722 if (block_i) {
723 block_i->last_alloc_logical_block = block + blks - 1;
724 block_i->last_alloc_physical_block =
725 le32_to_cpu(where[num].key) + blks - 1;
726 }
727
728
729
730 inode->i_ctime = CURRENT_TIME_SEC;
731 ext3_mark_inode_dirty(handle, inode);
732
733
734 if (where->bh) {
735
736
737
738
739
740
741
742
743 jbd_debug(5, "splicing indirect only\n");
744 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
745 err = ext3_journal_dirty_metadata(handle, where->bh);
746 if (err)
747 goto err_out;
748 } else {
749
750
751
752
753 jbd_debug(5, "splicing direct\n");
754 }
755 return err;
756
757err_out:
758 for (i = 1; i <= num; i++) {
759 BUFFER_TRACE(where[i].bh, "call journal_forget");
760 ext3_journal_forget(handle, where[i].bh);
761 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
762 }
763 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
764
765 return err;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
788 sector_t iblock, unsigned long maxblocks,
789 struct buffer_head *bh_result,
790 int create, int extend_disksize)
791{
792 int err = -EIO;
793 int offsets[4];
794 Indirect chain[4];
795 Indirect *partial;
796 ext3_fsblk_t goal;
797 int indirect_blks;
798 int blocks_to_boundary = 0;
799 int depth;
800 struct ext3_inode_info *ei = EXT3_I(inode);
801 int count = 0;
802 ext3_fsblk_t first_block = 0;
803
804
805 J_ASSERT(handle != NULL || create == 0);
806 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
807
808 if (depth == 0)
809 goto out;
810
811 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
812
813
814 if (!partial) {
815 first_block = le32_to_cpu(chain[depth - 1].key);
816 clear_buffer_new(bh_result);
817 count++;
818
819 while (count < maxblocks && count <= blocks_to_boundary) {
820 ext3_fsblk_t blk;
821
822 if (!verify_chain(chain, partial)) {
823
824
825
826
827
828
829
830 err = -EAGAIN;
831 count = 0;
832 break;
833 }
834 blk = le32_to_cpu(*(chain[depth-1].p + count));
835
836 if (blk == first_block + count)
837 count++;
838 else
839 break;
840 }
841 if (err != -EAGAIN)
842 goto got_it;
843 }
844
845
846 if (!create || err == -EIO)
847 goto cleanup;
848
849 mutex_lock(&ei->truncate_mutex);
850
851
852
853
854
855
856
857
858
859
860
861
862
863 if (err == -EAGAIN || !verify_chain(chain, partial)) {
864 while (partial > chain) {
865 brelse(partial->bh);
866 partial--;
867 }
868 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
869 if (!partial) {
870 count++;
871 mutex_unlock(&ei->truncate_mutex);
872 if (err)
873 goto cleanup;
874 clear_buffer_new(bh_result);
875 goto got_it;
876 }
877 }
878
879
880
881
882
883 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
884 ext3_init_block_alloc_info(inode);
885
886 goal = ext3_find_goal(inode, iblock, partial);
887
888
889 indirect_blks = (chain + depth) - partial - 1;
890
891
892
893
894
895 count = ext3_blks_to_allocate(partial, indirect_blks,
896 maxblocks, blocks_to_boundary);
897
898
899
900 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
901 offsets + (partial - chain), partial);
902
903
904
905
906
907
908
909
910 if (!err)
911 err = ext3_splice_branch(handle, inode, iblock,
912 partial, indirect_blks, count);
913
914
915
916
917
918 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
919 ei->i_disksize = inode->i_size;
920 mutex_unlock(&ei->truncate_mutex);
921 if (err)
922 goto cleanup;
923
924 set_buffer_new(bh_result);
925got_it:
926 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
927 if (count > blocks_to_boundary)
928 set_buffer_boundary(bh_result);
929 err = count;
930
931 partial = chain + depth - 1;
932cleanup:
933 while (partial > chain) {
934 BUFFER_TRACE(partial->bh, "call brelse");
935 brelse(partial->bh);
936 partial--;
937 }
938 BUFFER_TRACE(bh_result, "returned");
939out:
940 return err;
941}
942
943
944#define DIO_MAX_BLOCKS 4096
945
946
947
948
949
950
951
952#define DIO_CREDITS 25
953
954static int ext3_get_block(struct inode *inode, sector_t iblock,
955 struct buffer_head *bh_result, int create)
956{
957 handle_t *handle = ext3_journal_current_handle();
958 int ret = 0, started = 0;
959 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
960
961 if (create && !handle) {
962 if (max_blocks > DIO_MAX_BLOCKS)
963 max_blocks = DIO_MAX_BLOCKS;
964 handle = ext3_journal_start(inode, DIO_CREDITS +
965 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb));
966 if (IS_ERR(handle)) {
967 ret = PTR_ERR(handle);
968 goto out;
969 }
970 started = 1;
971 }
972
973 ret = ext3_get_blocks_handle(handle, inode, iblock,
974 max_blocks, bh_result, create, 0);
975 if (ret > 0) {
976 bh_result->b_size = (ret << inode->i_blkbits);
977 ret = 0;
978 }
979 if (started)
980 ext3_journal_stop(handle);
981out:
982 return ret;
983}
984
985int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
986 u64 start, u64 len)
987{
988 return generic_block_fiemap(inode, fieinfo, start, len,
989 ext3_get_block);
990}
991
992
993
994
995struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
996 long block, int create, int *errp)
997{
998 struct buffer_head dummy;
999 int fatal = 0, err;
1000
1001 J_ASSERT(handle != NULL || create == 0);
1002
1003 dummy.b_state = 0;
1004 dummy.b_blocknr = -1000;
1005 buffer_trace_init(&dummy.b_history);
1006 err = ext3_get_blocks_handle(handle, inode, block, 1,
1007 &dummy, create, 1);
1008
1009
1010
1011
1012 if (err > 0) {
1013 if (err > 1)
1014 WARN_ON(1);
1015 err = 0;
1016 }
1017 *errp = err;
1018 if (!err && buffer_mapped(&dummy)) {
1019 struct buffer_head *bh;
1020 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1021 if (!bh) {
1022 *errp = -EIO;
1023 goto err;
1024 }
1025 if (buffer_new(&dummy)) {
1026 J_ASSERT(create != 0);
1027 J_ASSERT(handle != NULL);
1028
1029
1030
1031
1032
1033
1034
1035
1036 lock_buffer(bh);
1037 BUFFER_TRACE(bh, "call get_create_access");
1038 fatal = ext3_journal_get_create_access(handle, bh);
1039 if (!fatal && !buffer_uptodate(bh)) {
1040 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1041 set_buffer_uptodate(bh);
1042 }
1043 unlock_buffer(bh);
1044 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1045 err = ext3_journal_dirty_metadata(handle, bh);
1046 if (!fatal)
1047 fatal = err;
1048 } else {
1049 BUFFER_TRACE(bh, "not a new buffer");
1050 }
1051 if (fatal) {
1052 *errp = fatal;
1053 brelse(bh);
1054 bh = NULL;
1055 }
1056 return bh;
1057 }
1058err:
1059 return NULL;
1060}
1061
1062struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1063 int block, int create, int *err)
1064{
1065 struct buffer_head * bh;
1066
1067 bh = ext3_getblk(handle, inode, block, create, err);
1068 if (!bh)
1069 return bh;
1070 if (buffer_uptodate(bh))
1071 return bh;
1072 ll_rw_block(READ_META, 1, &bh);
1073 wait_on_buffer(bh);
1074 if (buffer_uptodate(bh))
1075 return bh;
1076 put_bh(bh);
1077 *err = -EIO;
1078 return NULL;
1079}
1080
1081static int walk_page_buffers( handle_t *handle,
1082 struct buffer_head *head,
1083 unsigned from,
1084 unsigned to,
1085 int *partial,
1086 int (*fn)( handle_t *handle,
1087 struct buffer_head *bh))
1088{
1089 struct buffer_head *bh;
1090 unsigned block_start, block_end;
1091 unsigned blocksize = head->b_size;
1092 int err, ret = 0;
1093 struct buffer_head *next;
1094
1095 for ( bh = head, block_start = 0;
1096 ret == 0 && (bh != head || !block_start);
1097 block_start = block_end, bh = next)
1098 {
1099 next = bh->b_this_page;
1100 block_end = block_start + blocksize;
1101 if (block_end <= from || block_start >= to) {
1102 if (partial && !buffer_uptodate(bh))
1103 *partial = 1;
1104 continue;
1105 }
1106 err = (*fn)(handle, bh);
1107 if (!ret)
1108 ret = err;
1109 }
1110 return ret;
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138static int do_journal_get_write_access(handle_t *handle,
1139 struct buffer_head *bh)
1140{
1141 if (!buffer_mapped(bh) || buffer_freed(bh))
1142 return 0;
1143 return ext3_journal_get_write_access(handle, bh);
1144}
1145
1146static int ext3_write_begin(struct file *file, struct address_space *mapping,
1147 loff_t pos, unsigned len, unsigned flags,
1148 struct page **pagep, void **fsdata)
1149{
1150 struct inode *inode = mapping->host;
1151 int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1152 handle_t *handle;
1153 int retries = 0;
1154 struct page *page;
1155 pgoff_t index;
1156 unsigned from, to;
1157
1158 index = pos >> PAGE_CACHE_SHIFT;
1159 from = pos & (PAGE_CACHE_SIZE - 1);
1160 to = from + len;
1161
1162retry:
1163 page = __grab_cache_page(mapping, index);
1164 if (!page)
1165 return -ENOMEM;
1166 *pagep = page;
1167
1168 handle = ext3_journal_start(inode, needed_blocks);
1169 if (IS_ERR(handle)) {
1170 unlock_page(page);
1171 page_cache_release(page);
1172 ret = PTR_ERR(handle);
1173 goto out;
1174 }
1175 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1176 ext3_get_block);
1177 if (ret)
1178 goto write_begin_failed;
1179
1180 if (ext3_should_journal_data(inode)) {
1181 ret = walk_page_buffers(handle, page_buffers(page),
1182 from, to, NULL, do_journal_get_write_access);
1183 }
1184write_begin_failed:
1185 if (ret) {
1186 ext3_journal_stop(handle);
1187 unlock_page(page);
1188 page_cache_release(page);
1189
1190
1191
1192
1193
1194 if (pos + len > inode->i_size)
1195 vmtruncate(inode, inode->i_size);
1196 }
1197 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1198 goto retry;
1199out:
1200 return ret;
1201}
1202
1203
1204int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1205{
1206 int err = journal_dirty_data(handle, bh);
1207 if (err)
1208 ext3_journal_abort_handle(__func__, __func__,
1209 bh, handle, err);
1210 return err;
1211}
1212
1213
1214static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1215{
1216 if (!buffer_mapped(bh) || buffer_freed(bh))
1217 return 0;
1218 set_buffer_uptodate(bh);
1219 return ext3_journal_dirty_metadata(handle, bh);
1220}
1221
1222
1223
1224
1225
1226
1227
1228static int ext3_generic_write_end(struct file *file,
1229 struct address_space *mapping,
1230 loff_t pos, unsigned len, unsigned copied,
1231 struct page *page, void *fsdata)
1232{
1233 struct inode *inode = file->f_mapping->host;
1234
1235 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1236
1237 if (pos+copied > inode->i_size) {
1238 i_size_write(inode, pos+copied);
1239 mark_inode_dirty(inode);
1240 }
1241
1242 return copied;
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252static int ext3_ordered_write_end(struct file *file,
1253 struct address_space *mapping,
1254 loff_t pos, unsigned len, unsigned copied,
1255 struct page *page, void *fsdata)
1256{
1257 handle_t *handle = ext3_journal_current_handle();
1258 struct inode *inode = file->f_mapping->host;
1259 unsigned from, to;
1260 int ret = 0, ret2;
1261
1262 from = pos & (PAGE_CACHE_SIZE - 1);
1263 to = from + len;
1264
1265 ret = walk_page_buffers(handle, page_buffers(page),
1266 from, to, NULL, ext3_journal_dirty_data);
1267
1268 if (ret == 0) {
1269
1270
1271
1272
1273
1274 loff_t new_i_size;
1275
1276 new_i_size = pos + copied;
1277 if (new_i_size > EXT3_I(inode)->i_disksize)
1278 EXT3_I(inode)->i_disksize = new_i_size;
1279 ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
1280 page, fsdata);
1281 copied = ret2;
1282 if (ret2 < 0)
1283 ret = ret2;
1284 }
1285 ret2 = ext3_journal_stop(handle);
1286 if (!ret)
1287 ret = ret2;
1288 unlock_page(page);
1289 page_cache_release(page);
1290
1291 return ret ? ret : copied;
1292}
1293
1294static int ext3_writeback_write_end(struct file *file,
1295 struct address_space *mapping,
1296 loff_t pos, unsigned len, unsigned copied,
1297 struct page *page, void *fsdata)
1298{
1299 handle_t *handle = ext3_journal_current_handle();
1300 struct inode *inode = file->f_mapping->host;
1301 int ret = 0, ret2;
1302 loff_t new_i_size;
1303
1304 new_i_size = pos + copied;
1305 if (new_i_size > EXT3_I(inode)->i_disksize)
1306 EXT3_I(inode)->i_disksize = new_i_size;
1307
1308 ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
1309 page, fsdata);
1310 copied = ret2;
1311 if (ret2 < 0)
1312 ret = ret2;
1313
1314 ret2 = ext3_journal_stop(handle);
1315 if (!ret)
1316 ret = ret2;
1317 unlock_page(page);
1318 page_cache_release(page);
1319
1320 return ret ? ret : copied;
1321}
1322
1323static int ext3_journalled_write_end(struct file *file,
1324 struct address_space *mapping,
1325 loff_t pos, unsigned len, unsigned copied,
1326 struct page *page, void *fsdata)
1327{
1328 handle_t *handle = ext3_journal_current_handle();
1329 struct inode *inode = mapping->host;
1330 int ret = 0, ret2;
1331 int partial = 0;
1332 unsigned from, to;
1333
1334 from = pos & (PAGE_CACHE_SIZE - 1);
1335 to = from + len;
1336
1337 if (copied < len) {
1338 if (!PageUptodate(page))
1339 copied = 0;
1340 page_zero_new_buffers(page, from+copied, to);
1341 }
1342
1343 ret = walk_page_buffers(handle, page_buffers(page), from,
1344 to, &partial, write_end_fn);
1345 if (!partial)
1346 SetPageUptodate(page);
1347 if (pos+copied > inode->i_size)
1348 i_size_write(inode, pos+copied);
1349 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1350 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1351 EXT3_I(inode)->i_disksize = inode->i_size;
1352 ret2 = ext3_mark_inode_dirty(handle, inode);
1353 if (!ret)
1354 ret = ret2;
1355 }
1356
1357 ret2 = ext3_journal_stop(handle);
1358 if (!ret)
1359 ret = ret2;
1360 unlock_page(page);
1361 page_cache_release(page);
1362
1363 return ret ? ret : copied;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1381{
1382 struct inode *inode = mapping->host;
1383 journal_t *journal;
1384 int err;
1385
1386 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1406 journal = EXT3_JOURNAL(inode);
1407 journal_lock_updates(journal);
1408 err = journal_flush(journal);
1409 journal_unlock_updates(journal);
1410
1411 if (err)
1412 return 0;
1413 }
1414
1415 return generic_block_bmap(mapping,block,ext3_get_block);
1416}
1417
1418static int bget_one(handle_t *handle, struct buffer_head *bh)
1419{
1420 get_bh(bh);
1421 return 0;
1422}
1423
1424static int bput_one(handle_t *handle, struct buffer_head *bh)
1425{
1426 put_bh(bh);
1427 return 0;
1428}
1429
1430static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1431{
1432 if (buffer_mapped(bh))
1433 return ext3_journal_dirty_data(handle, bh);
1434 return 0;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489static int ext3_ordered_writepage(struct page *page,
1490 struct writeback_control *wbc)
1491{
1492 struct inode *inode = page->mapping->host;
1493 struct buffer_head *page_bufs;
1494 handle_t *handle = NULL;
1495 int ret = 0;
1496 int err;
1497
1498 J_ASSERT(PageLocked(page));
1499
1500
1501
1502
1503
1504 if (ext3_journal_current_handle())
1505 goto out_fail;
1506
1507 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1508
1509 if (IS_ERR(handle)) {
1510 ret = PTR_ERR(handle);
1511 goto out_fail;
1512 }
1513
1514 if (!page_has_buffers(page)) {
1515 create_empty_buffers(page, inode->i_sb->s_blocksize,
1516 (1 << BH_Dirty)|(1 << BH_Uptodate));
1517 }
1518 page_bufs = page_buffers(page);
1519 walk_page_buffers(handle, page_bufs, 0,
1520 PAGE_CACHE_SIZE, NULL, bget_one);
1521
1522 ret = block_write_full_page(page, ext3_get_block, wbc);
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 if (ret == 0) {
1537 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1538 NULL, journal_dirty_data_fn);
1539 if (!ret)
1540 ret = err;
1541 }
1542 walk_page_buffers(handle, page_bufs, 0,
1543 PAGE_CACHE_SIZE, NULL, bput_one);
1544 err = ext3_journal_stop(handle);
1545 if (!ret)
1546 ret = err;
1547 return ret;
1548
1549out_fail:
1550 redirty_page_for_writepage(wbc, page);
1551 unlock_page(page);
1552 return ret;
1553}
1554
1555static int ext3_writeback_writepage(struct page *page,
1556 struct writeback_control *wbc)
1557{
1558 struct inode *inode = page->mapping->host;
1559 handle_t *handle = NULL;
1560 int ret = 0;
1561 int err;
1562
1563 if (ext3_journal_current_handle())
1564 goto out_fail;
1565
1566 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1567 if (IS_ERR(handle)) {
1568 ret = PTR_ERR(handle);
1569 goto out_fail;
1570 }
1571
1572 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1573 ret = nobh_writepage(page, ext3_get_block, wbc);
1574 else
1575 ret = block_write_full_page(page, ext3_get_block, wbc);
1576
1577 err = ext3_journal_stop(handle);
1578 if (!ret)
1579 ret = err;
1580 return ret;
1581
1582out_fail:
1583 redirty_page_for_writepage(wbc, page);
1584 unlock_page(page);
1585 return ret;
1586}
1587
1588static int ext3_journalled_writepage(struct page *page,
1589 struct writeback_control *wbc)
1590{
1591 struct inode *inode = page->mapping->host;
1592 handle_t *handle = NULL;
1593 int ret = 0;
1594 int err;
1595
1596 if (ext3_journal_current_handle())
1597 goto no_write;
1598
1599 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1600 if (IS_ERR(handle)) {
1601 ret = PTR_ERR(handle);
1602 goto no_write;
1603 }
1604
1605 if (!page_has_buffers(page) || PageChecked(page)) {
1606
1607
1608
1609
1610 ClearPageChecked(page);
1611 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1612 ext3_get_block);
1613 if (ret != 0) {
1614 ext3_journal_stop(handle);
1615 goto out_unlock;
1616 }
1617 ret = walk_page_buffers(handle, page_buffers(page), 0,
1618 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1619
1620 err = walk_page_buffers(handle, page_buffers(page), 0,
1621 PAGE_CACHE_SIZE, NULL, write_end_fn);
1622 if (ret == 0)
1623 ret = err;
1624 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1625 unlock_page(page);
1626 } else {
1627
1628
1629
1630
1631
1632 ret = block_write_full_page(page, ext3_get_block, wbc);
1633 }
1634 err = ext3_journal_stop(handle);
1635 if (!ret)
1636 ret = err;
1637out:
1638 return ret;
1639
1640no_write:
1641 redirty_page_for_writepage(wbc, page);
1642out_unlock:
1643 unlock_page(page);
1644 goto out;
1645}
1646
1647static int ext3_readpage(struct file *file, struct page *page)
1648{
1649 return mpage_readpage(page, ext3_get_block);
1650}
1651
1652static int
1653ext3_readpages(struct file *file, struct address_space *mapping,
1654 struct list_head *pages, unsigned nr_pages)
1655{
1656 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1657}
1658
1659static void ext3_invalidatepage(struct page *page, unsigned long offset)
1660{
1661 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1662
1663
1664
1665
1666 if (offset == 0)
1667 ClearPageChecked(page);
1668
1669 journal_invalidatepage(journal, page, offset);
1670}
1671
1672static int ext3_releasepage(struct page *page, gfp_t wait)
1673{
1674 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1675
1676 WARN_ON(PageChecked(page));
1677 if (!page_has_buffers(page))
1678 return 0;
1679 return journal_try_to_free_buffers(journal, page, wait);
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1692 const struct iovec *iov, loff_t offset,
1693 unsigned long nr_segs)
1694{
1695 struct file *file = iocb->ki_filp;
1696 struct inode *inode = file->f_mapping->host;
1697 struct ext3_inode_info *ei = EXT3_I(inode);
1698 handle_t *handle;
1699 ssize_t ret;
1700 int orphan = 0;
1701 size_t count = iov_length(iov, nr_segs);
1702
1703 if (rw == WRITE) {
1704 loff_t final_size = offset + count;
1705
1706 if (final_size > inode->i_size) {
1707
1708 handle = ext3_journal_start(inode, 2);
1709 if (IS_ERR(handle)) {
1710 ret = PTR_ERR(handle);
1711 goto out;
1712 }
1713 ret = ext3_orphan_add(handle, inode);
1714 if (ret) {
1715 ext3_journal_stop(handle);
1716 goto out;
1717 }
1718 orphan = 1;
1719 ei->i_disksize = inode->i_size;
1720 ext3_journal_stop(handle);
1721 }
1722 }
1723
1724 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1725 offset, nr_segs,
1726 ext3_get_block, NULL);
1727
1728 if (orphan) {
1729 int err;
1730
1731
1732 handle = ext3_journal_start(inode, 2);
1733 if (IS_ERR(handle)) {
1734
1735
1736
1737 ret = PTR_ERR(handle);
1738 goto out;
1739 }
1740 if (inode->i_nlink)
1741 ext3_orphan_del(handle, inode);
1742 if (ret > 0) {
1743 loff_t end = offset + ret;
1744 if (end > inode->i_size) {
1745 ei->i_disksize = end;
1746 i_size_write(inode, end);
1747
1748
1749
1750
1751
1752
1753
1754 ext3_mark_inode_dirty(handle, inode);
1755 }
1756 }
1757 err = ext3_journal_stop(handle);
1758 if (ret == 0)
1759 ret = err;
1760 }
1761out:
1762 return ret;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static int ext3_journalled_set_page_dirty(struct page *page)
1779{
1780 SetPageChecked(page);
1781 return __set_page_dirty_nobuffers(page);
1782}
1783
1784static const struct address_space_operations ext3_ordered_aops = {
1785 .readpage = ext3_readpage,
1786 .readpages = ext3_readpages,
1787 .writepage = ext3_ordered_writepage,
1788 .sync_page = block_sync_page,
1789 .write_begin = ext3_write_begin,
1790 .write_end = ext3_ordered_write_end,
1791 .bmap = ext3_bmap,
1792 .invalidatepage = ext3_invalidatepage,
1793 .releasepage = ext3_releasepage,
1794 .direct_IO = ext3_direct_IO,
1795 .migratepage = buffer_migrate_page,
1796 .is_partially_uptodate = block_is_partially_uptodate,
1797};
1798
1799static const struct address_space_operations ext3_writeback_aops = {
1800 .readpage = ext3_readpage,
1801 .readpages = ext3_readpages,
1802 .writepage = ext3_writeback_writepage,
1803 .sync_page = block_sync_page,
1804 .write_begin = ext3_write_begin,
1805 .write_end = ext3_writeback_write_end,
1806 .bmap = ext3_bmap,
1807 .invalidatepage = ext3_invalidatepage,
1808 .releasepage = ext3_releasepage,
1809 .direct_IO = ext3_direct_IO,
1810 .migratepage = buffer_migrate_page,
1811 .is_partially_uptodate = block_is_partially_uptodate,
1812};
1813
1814static const struct address_space_operations ext3_journalled_aops = {
1815 .readpage = ext3_readpage,
1816 .readpages = ext3_readpages,
1817 .writepage = ext3_journalled_writepage,
1818 .sync_page = block_sync_page,
1819 .write_begin = ext3_write_begin,
1820 .write_end = ext3_journalled_write_end,
1821 .set_page_dirty = ext3_journalled_set_page_dirty,
1822 .bmap = ext3_bmap,
1823 .invalidatepage = ext3_invalidatepage,
1824 .releasepage = ext3_releasepage,
1825 .is_partially_uptodate = block_is_partially_uptodate,
1826};
1827
1828void ext3_set_aops(struct inode *inode)
1829{
1830 if (ext3_should_order_data(inode))
1831 inode->i_mapping->a_ops = &ext3_ordered_aops;
1832 else if (ext3_should_writeback_data(inode))
1833 inode->i_mapping->a_ops = &ext3_writeback_aops;
1834 else
1835 inode->i_mapping->a_ops = &ext3_journalled_aops;
1836}
1837
1838
1839
1840
1841
1842
1843
1844static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1845 struct address_space *mapping, loff_t from)
1846{
1847 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1848 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1849 unsigned blocksize, iblock, length, pos;
1850 struct inode *inode = mapping->host;
1851 struct buffer_head *bh;
1852 int err = 0;
1853
1854 blocksize = inode->i_sb->s_blocksize;
1855 length = blocksize - (offset & (blocksize - 1));
1856 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1857
1858
1859
1860
1861
1862 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1863 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1864 zero_user(page, offset, length);
1865 set_page_dirty(page);
1866 goto unlock;
1867 }
1868
1869 if (!page_has_buffers(page))
1870 create_empty_buffers(page, blocksize, 0);
1871
1872
1873 bh = page_buffers(page);
1874 pos = blocksize;
1875 while (offset >= pos) {
1876 bh = bh->b_this_page;
1877 iblock++;
1878 pos += blocksize;
1879 }
1880
1881 err = 0;
1882 if (buffer_freed(bh)) {
1883 BUFFER_TRACE(bh, "freed: skip");
1884 goto unlock;
1885 }
1886
1887 if (!buffer_mapped(bh)) {
1888 BUFFER_TRACE(bh, "unmapped");
1889 ext3_get_block(inode, iblock, bh, 0);
1890
1891 if (!buffer_mapped(bh)) {
1892 BUFFER_TRACE(bh, "still unmapped");
1893 goto unlock;
1894 }
1895 }
1896
1897
1898 if (PageUptodate(page))
1899 set_buffer_uptodate(bh);
1900
1901 if (!buffer_uptodate(bh)) {
1902 err = -EIO;
1903 ll_rw_block(READ, 1, &bh);
1904 wait_on_buffer(bh);
1905
1906 if (!buffer_uptodate(bh))
1907 goto unlock;
1908 }
1909
1910 if (ext3_should_journal_data(inode)) {
1911 BUFFER_TRACE(bh, "get write access");
1912 err = ext3_journal_get_write_access(handle, bh);
1913 if (err)
1914 goto unlock;
1915 }
1916
1917 zero_user(page, offset, length);
1918 BUFFER_TRACE(bh, "zeroed end of block");
1919
1920 err = 0;
1921 if (ext3_should_journal_data(inode)) {
1922 err = ext3_journal_dirty_metadata(handle, bh);
1923 } else {
1924 if (ext3_should_order_data(inode))
1925 err = ext3_journal_dirty_data(handle, bh);
1926 mark_buffer_dirty(bh);
1927 }
1928
1929unlock:
1930 unlock_page(page);
1931 page_cache_release(page);
1932 return err;
1933}
1934
1935
1936
1937
1938
1939
1940static inline int all_zeroes(__le32 *p, __le32 *q)
1941{
1942 while (p < q)
1943 if (*p++)
1944 return 0;
1945 return 1;
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983static Indirect *ext3_find_shared(struct inode *inode, int depth,
1984 int offsets[4], Indirect chain[4], __le32 *top)
1985{
1986 Indirect *partial, *p;
1987 int k, err;
1988
1989 *top = 0;
1990
1991 for (k = depth; k > 1 && !offsets[k-1]; k--)
1992 ;
1993 partial = ext3_get_branch(inode, k, offsets, chain, &err);
1994
1995 if (!partial)
1996 partial = chain + k-1;
1997
1998
1999
2000
2001 if (!partial->key && *partial->p)
2002
2003 goto no_top;
2004 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2005 ;
2006
2007
2008
2009
2010
2011
2012 if (p == chain + k - 1 && p > chain) {
2013 p->p--;
2014 } else {
2015 *top = *p->p;
2016
2017
2018
2019
2020 }
2021
2022
2023 while(partial > p) {
2024 brelse(partial->bh);
2025 partial--;
2026 }
2027no_top:
2028 return partial;
2029}
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2040 struct buffer_head *bh, ext3_fsblk_t block_to_free,
2041 unsigned long count, __le32 *first, __le32 *last)
2042{
2043 __le32 *p;
2044 if (try_to_extend_transaction(handle, inode)) {
2045 if (bh) {
2046 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2047 ext3_journal_dirty_metadata(handle, bh);
2048 }
2049 ext3_mark_inode_dirty(handle, inode);
2050 ext3_journal_test_restart(handle, inode);
2051 if (bh) {
2052 BUFFER_TRACE(bh, "retaking write access");
2053 ext3_journal_get_write_access(handle, bh);
2054 }
2055 }
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 for (p = first; p < last; p++) {
2066 u32 nr = le32_to_cpu(*p);
2067 if (nr) {
2068 struct buffer_head *bh;
2069
2070 *p = 0;
2071 bh = sb_find_get_block(inode->i_sb, nr);
2072 ext3_forget(handle, 0, inode, bh, nr);
2073 }
2074 }
2075
2076 ext3_free_blocks(handle, inode, block_to_free, count);
2077}
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098static void ext3_free_data(handle_t *handle, struct inode *inode,
2099 struct buffer_head *this_bh,
2100 __le32 *first, __le32 *last)
2101{
2102 ext3_fsblk_t block_to_free = 0;
2103 unsigned long count = 0;
2104 __le32 *block_to_free_p = NULL;
2105
2106
2107 ext3_fsblk_t nr;
2108 __le32 *p;
2109
2110 int err;
2111
2112 if (this_bh) {
2113 BUFFER_TRACE(this_bh, "get_write_access");
2114 err = ext3_journal_get_write_access(handle, this_bh);
2115
2116
2117 if (err)
2118 return;
2119 }
2120
2121 for (p = first; p < last; p++) {
2122 nr = le32_to_cpu(*p);
2123 if (nr) {
2124
2125 if (count == 0) {
2126 block_to_free = nr;
2127 block_to_free_p = p;
2128 count = 1;
2129 } else if (nr == block_to_free + count) {
2130 count++;
2131 } else {
2132 ext3_clear_blocks(handle, inode, this_bh,
2133 block_to_free,
2134 count, block_to_free_p, p);
2135 block_to_free = nr;
2136 block_to_free_p = p;
2137 count = 1;
2138 }
2139 }
2140 }
2141
2142 if (count > 0)
2143 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2144 count, block_to_free_p, p);
2145
2146 if (this_bh) {
2147 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2148
2149
2150
2151
2152
2153
2154
2155 if (bh2jh(this_bh))
2156 ext3_journal_dirty_metadata(handle, this_bh);
2157 else
2158 ext3_error(inode->i_sb, "ext3_free_data",
2159 "circular indirect block detected, "
2160 "inode=%lu, block=%llu",
2161 inode->i_ino,
2162 (unsigned long long)this_bh->b_blocknr);
2163 }
2164}
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179static void ext3_free_branches(handle_t *handle, struct inode *inode,
2180 struct buffer_head *parent_bh,
2181 __le32 *first, __le32 *last, int depth)
2182{
2183 ext3_fsblk_t nr;
2184 __le32 *p;
2185
2186 if (is_handle_aborted(handle))
2187 return;
2188
2189 if (depth--) {
2190 struct buffer_head *bh;
2191 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2192 p = last;
2193 while (--p >= first) {
2194 nr = le32_to_cpu(*p);
2195 if (!nr)
2196 continue;
2197
2198
2199 bh = sb_bread(inode->i_sb, nr);
2200
2201
2202
2203
2204
2205 if (!bh) {
2206 ext3_error(inode->i_sb, "ext3_free_branches",
2207 "Read failure, inode=%lu, block="E3FSBLK,
2208 inode->i_ino, nr);
2209 continue;
2210 }
2211
2212
2213 BUFFER_TRACE(bh, "free child branches");
2214 ext3_free_branches(handle, inode, bh,
2215 (__le32*)bh->b_data,
2216 (__le32*)bh->b_data + addr_per_block,
2217 depth);
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 if (is_handle_aborted(handle))
2257 return;
2258 if (try_to_extend_transaction(handle, inode)) {
2259 ext3_mark_inode_dirty(handle, inode);
2260 ext3_journal_test_restart(handle, inode);
2261 }
2262
2263 ext3_free_blocks(handle, inode, nr, 1);
2264
2265 if (parent_bh) {
2266
2267
2268
2269
2270 BUFFER_TRACE(parent_bh, "get_write_access");
2271 if (!ext3_journal_get_write_access(handle,
2272 parent_bh)){
2273 *p = 0;
2274 BUFFER_TRACE(parent_bh,
2275 "call ext3_journal_dirty_metadata");
2276 ext3_journal_dirty_metadata(handle,
2277 parent_bh);
2278 }
2279 }
2280 }
2281 } else {
2282
2283 BUFFER_TRACE(parent_bh, "free data blocks");
2284 ext3_free_data(handle, inode, parent_bh, first, last);
2285 }
2286}
2287
2288int ext3_can_truncate(struct inode *inode)
2289{
2290 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2291 return 0;
2292 if (S_ISREG(inode->i_mode))
2293 return 1;
2294 if (S_ISDIR(inode->i_mode))
2295 return 1;
2296 if (S_ISLNK(inode->i_mode))
2297 return !ext3_inode_is_fast_symlink(inode);
2298 return 0;
2299}
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329void ext3_truncate(struct inode *inode)
2330{
2331 handle_t *handle;
2332 struct ext3_inode_info *ei = EXT3_I(inode);
2333 __le32 *i_data = ei->i_data;
2334 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2335 struct address_space *mapping = inode->i_mapping;
2336 int offsets[4];
2337 Indirect chain[4];
2338 Indirect *partial;
2339 __le32 nr = 0;
2340 int n;
2341 long last_block;
2342 unsigned blocksize = inode->i_sb->s_blocksize;
2343 struct page *page;
2344
2345 if (!ext3_can_truncate(inode))
2346 return;
2347
2348
2349
2350
2351
2352 if ((inode->i_size & (blocksize - 1)) == 0) {
2353
2354 page = NULL;
2355 } else {
2356 page = grab_cache_page(mapping,
2357 inode->i_size >> PAGE_CACHE_SHIFT);
2358 if (!page)
2359 return;
2360 }
2361
2362 handle = start_transaction(inode);
2363 if (IS_ERR(handle)) {
2364 if (page) {
2365 clear_highpage(page);
2366 flush_dcache_page(page);
2367 unlock_page(page);
2368 page_cache_release(page);
2369 }
2370 return;
2371 }
2372
2373 last_block = (inode->i_size + blocksize-1)
2374 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2375
2376 if (page)
2377 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2378
2379 n = ext3_block_to_path(inode, last_block, offsets, NULL);
2380 if (n == 0)
2381 goto out_stop;
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392 if (ext3_orphan_add(handle, inode))
2393 goto out_stop;
2394
2395
2396
2397
2398
2399
2400
2401
2402 ei->i_disksize = inode->i_size;
2403
2404
2405
2406
2407
2408 mutex_lock(&ei->truncate_mutex);
2409
2410 if (n == 1) {
2411 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2412 i_data + EXT3_NDIR_BLOCKS);
2413 goto do_indirects;
2414 }
2415
2416 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2417
2418 if (nr) {
2419 if (partial == chain) {
2420
2421 ext3_free_branches(handle, inode, NULL,
2422 &nr, &nr+1, (chain+n-1) - partial);
2423 *partial->p = 0;
2424
2425
2426
2427
2428 } else {
2429
2430 BUFFER_TRACE(partial->bh, "get_write_access");
2431 ext3_free_branches(handle, inode, partial->bh,
2432 partial->p,
2433 partial->p+1, (chain+n-1) - partial);
2434 }
2435 }
2436
2437 while (partial > chain) {
2438 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2439 (__le32*)partial->bh->b_data+addr_per_block,
2440 (chain+n-1) - partial);
2441 BUFFER_TRACE(partial->bh, "call brelse");
2442 brelse (partial->bh);
2443 partial--;
2444 }
2445do_indirects:
2446
2447 switch (offsets[0]) {
2448 default:
2449 nr = i_data[EXT3_IND_BLOCK];
2450 if (nr) {
2451 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2452 i_data[EXT3_IND_BLOCK] = 0;
2453 }
2454 case EXT3_IND_BLOCK:
2455 nr = i_data[EXT3_DIND_BLOCK];
2456 if (nr) {
2457 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2458 i_data[EXT3_DIND_BLOCK] = 0;
2459 }
2460 case EXT3_DIND_BLOCK:
2461 nr = i_data[EXT3_TIND_BLOCK];
2462 if (nr) {
2463 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2464 i_data[EXT3_TIND_BLOCK] = 0;
2465 }
2466 case EXT3_TIND_BLOCK:
2467 ;
2468 }
2469
2470 ext3_discard_reservation(inode);
2471
2472 mutex_unlock(&ei->truncate_mutex);
2473 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2474 ext3_mark_inode_dirty(handle, inode);
2475
2476
2477
2478
2479
2480 if (IS_SYNC(inode))
2481 handle->h_sync = 1;
2482out_stop:
2483
2484
2485
2486
2487
2488
2489
2490 if (inode->i_nlink)
2491 ext3_orphan_del(handle, inode);
2492
2493 ext3_journal_stop(handle);
2494}
2495
2496static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2497 unsigned long ino, struct ext3_iloc *iloc)
2498{
2499 unsigned long block_group;
2500 unsigned long offset;
2501 ext3_fsblk_t block;
2502 struct ext3_group_desc *gdp;
2503
2504 if (!ext3_valid_inum(sb, ino)) {
2505
2506
2507
2508
2509
2510 return 0;
2511 }
2512
2513 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2514 gdp = ext3_get_group_desc(sb, block_group, NULL);
2515 if (!gdp)
2516 return 0;
2517
2518
2519
2520 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2521 EXT3_INODE_SIZE(sb);
2522 block = le32_to_cpu(gdp->bg_inode_table) +
2523 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2524
2525 iloc->block_group = block_group;
2526 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2527 return block;
2528}
2529
2530
2531
2532
2533
2534
2535
2536static int __ext3_get_inode_loc(struct inode *inode,
2537 struct ext3_iloc *iloc, int in_mem)
2538{
2539 ext3_fsblk_t block;
2540 struct buffer_head *bh;
2541
2542 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2543 if (!block)
2544 return -EIO;
2545
2546 bh = sb_getblk(inode->i_sb, block);
2547 if (!bh) {
2548 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2549 "unable to read inode block - "
2550 "inode=%lu, block="E3FSBLK,
2551 inode->i_ino, block);
2552 return -EIO;
2553 }
2554 if (!buffer_uptodate(bh)) {
2555 lock_buffer(bh);
2556
2557
2558
2559
2560
2561
2562
2563 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2564 set_buffer_uptodate(bh);
2565
2566 if (buffer_uptodate(bh)) {
2567
2568 unlock_buffer(bh);
2569 goto has_buffer;
2570 }
2571
2572
2573
2574
2575
2576
2577 if (in_mem) {
2578 struct buffer_head *bitmap_bh;
2579 struct ext3_group_desc *desc;
2580 int inodes_per_buffer;
2581 int inode_offset, i;
2582 int block_group;
2583 int start;
2584
2585 block_group = (inode->i_ino - 1) /
2586 EXT3_INODES_PER_GROUP(inode->i_sb);
2587 inodes_per_buffer = bh->b_size /
2588 EXT3_INODE_SIZE(inode->i_sb);
2589 inode_offset = ((inode->i_ino - 1) %
2590 EXT3_INODES_PER_GROUP(inode->i_sb));
2591 start = inode_offset & ~(inodes_per_buffer - 1);
2592
2593
2594 desc = ext3_get_group_desc(inode->i_sb,
2595 block_group, NULL);
2596 if (!desc)
2597 goto make_io;
2598
2599 bitmap_bh = sb_getblk(inode->i_sb,
2600 le32_to_cpu(desc->bg_inode_bitmap));
2601 if (!bitmap_bh)
2602 goto make_io;
2603
2604
2605
2606
2607
2608
2609 if (!buffer_uptodate(bitmap_bh)) {
2610 brelse(bitmap_bh);
2611 goto make_io;
2612 }
2613 for (i = start; i < start + inodes_per_buffer; i++) {
2614 if (i == inode_offset)
2615 continue;
2616 if (ext3_test_bit(i, bitmap_bh->b_data))
2617 break;
2618 }
2619 brelse(bitmap_bh);
2620 if (i == start + inodes_per_buffer) {
2621
2622 memset(bh->b_data, 0, bh->b_size);
2623 set_buffer_uptodate(bh);
2624 unlock_buffer(bh);
2625 goto has_buffer;
2626 }
2627 }
2628
2629make_io:
2630
2631
2632
2633
2634
2635 get_bh(bh);
2636 bh->b_end_io = end_buffer_read_sync;
2637 submit_bh(READ_META, bh);
2638 wait_on_buffer(bh);
2639 if (!buffer_uptodate(bh)) {
2640 ext3_error(inode->i_sb, "ext3_get_inode_loc",
2641 "unable to read inode block - "
2642 "inode=%lu, block="E3FSBLK,
2643 inode->i_ino, block);
2644 brelse(bh);
2645 return -EIO;
2646 }
2647 }
2648has_buffer:
2649 iloc->bh = bh;
2650 return 0;
2651}
2652
2653int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2654{
2655
2656 return __ext3_get_inode_loc(inode, iloc,
2657 !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
2658}
2659
2660void ext3_set_inode_flags(struct inode *inode)
2661{
2662 unsigned int flags = EXT3_I(inode)->i_flags;
2663
2664 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2665 if (flags & EXT3_SYNC_FL)
2666 inode->i_flags |= S_SYNC;
2667 if (flags & EXT3_APPEND_FL)
2668 inode->i_flags |= S_APPEND;
2669 if (flags & EXT3_IMMUTABLE_FL)
2670 inode->i_flags |= S_IMMUTABLE;
2671 if (flags & EXT3_NOATIME_FL)
2672 inode->i_flags |= S_NOATIME;
2673 if (flags & EXT3_DIRSYNC_FL)
2674 inode->i_flags |= S_DIRSYNC;
2675}
2676
2677
2678void ext3_get_inode_flags(struct ext3_inode_info *ei)
2679{
2680 unsigned int flags = ei->vfs_inode.i_flags;
2681
2682 ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2683 EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2684 if (flags & S_SYNC)
2685 ei->i_flags |= EXT3_SYNC_FL;
2686 if (flags & S_APPEND)
2687 ei->i_flags |= EXT3_APPEND_FL;
2688 if (flags & S_IMMUTABLE)
2689 ei->i_flags |= EXT3_IMMUTABLE_FL;
2690 if (flags & S_NOATIME)
2691 ei->i_flags |= EXT3_NOATIME_FL;
2692 if (flags & S_DIRSYNC)
2693 ei->i_flags |= EXT3_DIRSYNC_FL;
2694}
2695
2696struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2697{
2698 struct ext3_iloc iloc;
2699 struct ext3_inode *raw_inode;
2700 struct ext3_inode_info *ei;
2701 struct buffer_head *bh;
2702 struct inode *inode;
2703 long ret;
2704 int block;
2705
2706 inode = iget_locked(sb, ino);
2707 if (!inode)
2708 return ERR_PTR(-ENOMEM);
2709 if (!(inode->i_state & I_NEW))
2710 return inode;
2711
2712 ei = EXT3_I(inode);
2713#ifdef CONFIG_EXT3_FS_POSIX_ACL
2714 ei->i_acl = EXT3_ACL_NOT_CACHED;
2715 ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2716#endif
2717 ei->i_block_alloc_info = NULL;
2718
2719 ret = __ext3_get_inode_loc(inode, &iloc, 0);
2720 if (ret < 0)
2721 goto bad_inode;
2722 bh = iloc.bh;
2723 raw_inode = ext3_raw_inode(&iloc);
2724 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2725 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2726 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2727 if(!(test_opt (inode->i_sb, NO_UID32))) {
2728 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2729 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2730 }
2731 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2732 inode->i_size = le32_to_cpu(raw_inode->i_size);
2733 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2734 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2735 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2736 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2737
2738 ei->i_state = 0;
2739 ei->i_dir_start_lookup = 0;
2740 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2741
2742
2743
2744
2745
2746 if (inode->i_nlink == 0) {
2747 if (inode->i_mode == 0 ||
2748 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2749
2750 brelse (bh);
2751 ret = -ESTALE;
2752 goto bad_inode;
2753 }
2754
2755
2756
2757
2758 }
2759 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2760 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2761#ifdef EXT3_FRAGMENTS
2762 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2763 ei->i_frag_no = raw_inode->i_frag;
2764 ei->i_frag_size = raw_inode->i_fsize;
2765#endif
2766 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2767 if (!S_ISREG(inode->i_mode)) {
2768 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2769 } else {
2770 inode->i_size |=
2771 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2772 }
2773 ei->i_disksize = inode->i_size;
2774 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2775 ei->i_block_group = iloc.block_group;
2776
2777
2778
2779
2780 for (block = 0; block < EXT3_N_BLOCKS; block++)
2781 ei->i_data[block] = raw_inode->i_block[block];
2782 INIT_LIST_HEAD(&ei->i_orphan);
2783
2784 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2785 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2786
2787
2788
2789
2790
2791 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2792 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2793 EXT3_INODE_SIZE(inode->i_sb)) {
2794 brelse (bh);
2795 ret = -EIO;
2796 goto bad_inode;
2797 }
2798 if (ei->i_extra_isize == 0) {
2799
2800 ei->i_extra_isize = sizeof(struct ext3_inode) -
2801 EXT3_GOOD_OLD_INODE_SIZE;
2802 } else {
2803 __le32 *magic = (void *)raw_inode +
2804 EXT3_GOOD_OLD_INODE_SIZE +
2805 ei->i_extra_isize;
2806 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2807 ei->i_state |= EXT3_STATE_XATTR;
2808 }
2809 } else
2810 ei->i_extra_isize = 0;
2811
2812 if (S_ISREG(inode->i_mode)) {
2813 inode->i_op = &ext3_file_inode_operations;
2814 inode->i_fop = &ext3_file_operations;
2815 ext3_set_aops(inode);
2816 } else if (S_ISDIR(inode->i_mode)) {
2817 inode->i_op = &ext3_dir_inode_operations;
2818 inode->i_fop = &ext3_dir_operations;
2819 } else if (S_ISLNK(inode->i_mode)) {
2820 if (ext3_inode_is_fast_symlink(inode))
2821 inode->i_op = &ext3_fast_symlink_inode_operations;
2822 else {
2823 inode->i_op = &ext3_symlink_inode_operations;
2824 ext3_set_aops(inode);
2825 }
2826 } else {
2827 inode->i_op = &ext3_special_inode_operations;
2828 if (raw_inode->i_block[0])
2829 init_special_inode(inode, inode->i_mode,
2830 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2831 else
2832 init_special_inode(inode, inode->i_mode,
2833 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2834 }
2835 brelse (iloc.bh);
2836 ext3_set_inode_flags(inode);
2837 unlock_new_inode(inode);
2838 return inode;
2839
2840bad_inode:
2841 iget_failed(inode);
2842 return ERR_PTR(ret);
2843}
2844
2845
2846
2847
2848
2849
2850
2851
2852static int ext3_do_update_inode(handle_t *handle,
2853 struct inode *inode,
2854 struct ext3_iloc *iloc)
2855{
2856 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2857 struct ext3_inode_info *ei = EXT3_I(inode);
2858 struct buffer_head *bh = iloc->bh;
2859 int err = 0, rc, block;
2860
2861
2862
2863 if (ei->i_state & EXT3_STATE_NEW)
2864 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2865
2866 ext3_get_inode_flags(ei);
2867 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2868 if(!(test_opt(inode->i_sb, NO_UID32))) {
2869 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2870 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2871
2872
2873
2874
2875 if(!ei->i_dtime) {
2876 raw_inode->i_uid_high =
2877 cpu_to_le16(high_16_bits(inode->i_uid));
2878 raw_inode->i_gid_high =
2879 cpu_to_le16(high_16_bits(inode->i_gid));
2880 } else {
2881 raw_inode->i_uid_high = 0;
2882 raw_inode->i_gid_high = 0;
2883 }
2884 } else {
2885 raw_inode->i_uid_low =
2886 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2887 raw_inode->i_gid_low =
2888 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2889 raw_inode->i_uid_high = 0;
2890 raw_inode->i_gid_high = 0;
2891 }
2892 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2893 raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2894 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2895 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2896 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2897 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2898 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2899 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2900#ifdef EXT3_FRAGMENTS
2901 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2902 raw_inode->i_frag = ei->i_frag_no;
2903 raw_inode->i_fsize = ei->i_frag_size;
2904#endif
2905 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2906 if (!S_ISREG(inode->i_mode)) {
2907 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2908 } else {
2909 raw_inode->i_size_high =
2910 cpu_to_le32(ei->i_disksize >> 32);
2911 if (ei->i_disksize > 0x7fffffffULL) {
2912 struct super_block *sb = inode->i_sb;
2913 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2914 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2915 EXT3_SB(sb)->s_es->s_rev_level ==
2916 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2917
2918
2919
2920 err = ext3_journal_get_write_access(handle,
2921 EXT3_SB(sb)->s_sbh);
2922 if (err)
2923 goto out_brelse;
2924 ext3_update_dynamic_rev(sb);
2925 EXT3_SET_RO_COMPAT_FEATURE(sb,
2926 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2927 sb->s_dirt = 1;
2928 handle->h_sync = 1;
2929 err = ext3_journal_dirty_metadata(handle,
2930 EXT3_SB(sb)->s_sbh);
2931 }
2932 }
2933 }
2934 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2935 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2936 if (old_valid_dev(inode->i_rdev)) {
2937 raw_inode->i_block[0] =
2938 cpu_to_le32(old_encode_dev(inode->i_rdev));
2939 raw_inode->i_block[1] = 0;
2940 } else {
2941 raw_inode->i_block[0] = 0;
2942 raw_inode->i_block[1] =
2943 cpu_to_le32(new_encode_dev(inode->i_rdev));
2944 raw_inode->i_block[2] = 0;
2945 }
2946 } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2947 raw_inode->i_block[block] = ei->i_data[block];
2948
2949 if (ei->i_extra_isize)
2950 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2951
2952 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2953 rc = ext3_journal_dirty_metadata(handle, bh);
2954 if (!err)
2955 err = rc;
2956 ei->i_state &= ~EXT3_STATE_NEW;
2957
2958out_brelse:
2959 brelse (bh);
2960 ext3_std_error(inode->i_sb, err);
2961 return err;
2962}
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999int ext3_write_inode(struct inode *inode, int wait)
3000{
3001 if (current->flags & PF_MEMALLOC)
3002 return 0;
3003
3004 if (ext3_journal_current_handle()) {
3005 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3006 dump_stack();
3007 return -EIO;
3008 }
3009
3010 if (!wait)
3011 return 0;
3012
3013 return ext3_force_commit(inode->i_sb);
3014}
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3034{
3035 struct inode *inode = dentry->d_inode;
3036 int error, rc = 0;
3037 const unsigned int ia_valid = attr->ia_valid;
3038
3039 error = inode_change_ok(inode, attr);
3040 if (error)
3041 return error;
3042
3043 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3044 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3045 handle_t *handle;
3046
3047
3048
3049 handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+
3050 EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3051 if (IS_ERR(handle)) {
3052 error = PTR_ERR(handle);
3053 goto err_out;
3054 }
3055 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3056 if (error) {
3057 ext3_journal_stop(handle);
3058 return error;
3059 }
3060
3061
3062 if (attr->ia_valid & ATTR_UID)
3063 inode->i_uid = attr->ia_uid;
3064 if (attr->ia_valid & ATTR_GID)
3065 inode->i_gid = attr->ia_gid;
3066 error = ext3_mark_inode_dirty(handle, inode);
3067 ext3_journal_stop(handle);
3068 }
3069
3070 if (S_ISREG(inode->i_mode) &&
3071 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3072 handle_t *handle;
3073
3074 handle = ext3_journal_start(inode, 3);
3075 if (IS_ERR(handle)) {
3076 error = PTR_ERR(handle);
3077 goto err_out;
3078 }
3079
3080 error = ext3_orphan_add(handle, inode);
3081 EXT3_I(inode)->i_disksize = attr->ia_size;
3082 rc = ext3_mark_inode_dirty(handle, inode);
3083 if (!error)
3084 error = rc;
3085 ext3_journal_stop(handle);
3086 }
3087
3088 rc = inode_setattr(inode, attr);
3089
3090
3091
3092
3093 if (inode->i_nlink)
3094 ext3_orphan_del(NULL, inode);
3095
3096 if (!rc && (ia_valid & ATTR_MODE))
3097 rc = ext3_acl_chmod(inode);
3098
3099err_out:
3100 ext3_std_error(inode->i_sb, error);
3101 if (!error)
3102 error = rc;
3103 return error;
3104}
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134static int ext3_writepage_trans_blocks(struct inode *inode)
3135{
3136 int bpp = ext3_journal_blocks_per_page(inode);
3137 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3138 int ret;
3139
3140 if (ext3_should_journal_data(inode))
3141 ret = 3 * (bpp + indirects) + 2;
3142 else
3143 ret = 2 * (bpp + indirects) + 2;
3144
3145#ifdef CONFIG_QUOTA
3146
3147
3148 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
3149#endif
3150
3151 return ret;
3152}
3153
3154
3155
3156
3157
3158int ext3_mark_iloc_dirty(handle_t *handle,
3159 struct inode *inode, struct ext3_iloc *iloc)
3160{
3161 int err = 0;
3162
3163
3164 get_bh(iloc->bh);
3165
3166
3167 err = ext3_do_update_inode(handle, inode, iloc);
3168 put_bh(iloc->bh);
3169 return err;
3170}
3171
3172
3173
3174
3175
3176
3177int
3178ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3179 struct ext3_iloc *iloc)
3180{
3181 int err = 0;
3182 if (handle) {
3183 err = ext3_get_inode_loc(inode, iloc);
3184 if (!err) {
3185 BUFFER_TRACE(iloc->bh, "get_write_access");
3186 err = ext3_journal_get_write_access(handle, iloc->bh);
3187 if (err) {
3188 brelse(iloc->bh);
3189 iloc->bh = NULL;
3190 }
3191 }
3192 }
3193 ext3_std_error(inode->i_sb, err);
3194 return err;
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3219{
3220 struct ext3_iloc iloc;
3221 int err;
3222
3223 might_sleep();
3224 err = ext3_reserve_inode_write(handle, inode, &iloc);
3225 if (!err)
3226 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3227 return err;
3228}
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244void ext3_dirty_inode(struct inode *inode)
3245{
3246 handle_t *current_handle = ext3_journal_current_handle();
3247 handle_t *handle;
3248
3249 handle = ext3_journal_start(inode, 2);
3250 if (IS_ERR(handle))
3251 goto out;
3252 if (current_handle &&
3253 current_handle->h_transaction != handle->h_transaction) {
3254
3255 printk(KERN_EMERG "%s: transactions do not match!\n",
3256 __func__);
3257 } else {
3258 jbd_debug(5, "marking dirty. outer handle=%p\n",
3259 current_handle);
3260 ext3_mark_inode_dirty(handle, inode);
3261 }
3262 ext3_journal_stop(handle);
3263out:
3264 return;
3265}
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296int ext3_change_inode_journal_flag(struct inode *inode, int val)
3297{
3298 journal_t *journal;
3299 handle_t *handle;
3300 int err;
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312 journal = EXT3_JOURNAL(inode);
3313 if (is_journal_aborted(journal))
3314 return -EROFS;
3315
3316 journal_lock_updates(journal);
3317 journal_flush(journal);
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327 if (val)
3328 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3329 else
3330 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3331 ext3_set_aops(inode);
3332
3333 journal_unlock_updates(journal);
3334
3335
3336
3337 handle = ext3_journal_start(inode, 1);
3338 if (IS_ERR(handle))
3339 return PTR_ERR(handle);
3340
3341 err = ext3_mark_inode_dirty(handle, inode);
3342 handle->h_sync = 1;
3343 ext3_journal_stop(handle);
3344 ext3_std_error(inode->i_sb, err);
3345
3346 return err;
3347}