1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#include "ubifs.h"
55#include <linux/mount.h>
56#include <linux/namei.h>
57
58static int read_block(struct inode *inode, void *addr, unsigned int block,
59 struct ubifs_data_node *dn)
60{
61 struct ubifs_info *c = inode->i_sb->s_fs_info;
62 int err, len, out_len;
63 union ubifs_key key;
64 unsigned int dlen;
65
66 data_key_init(c, &key, inode->i_ino, block);
67 err = ubifs_tnc_lookup(c, &key, dn);
68 if (err) {
69 if (err == -ENOENT)
70
71 memset(addr, 0, UBIFS_BLOCK_SIZE);
72 return err;
73 }
74
75 ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum);
76
77 len = le32_to_cpu(dn->size);
78 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
79 goto dump;
80
81 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
82 out_len = UBIFS_BLOCK_SIZE;
83 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
84 le16_to_cpu(dn->compr_type));
85 if (err || len != out_len)
86 goto dump;
87
88
89
90
91
92
93 if (len < UBIFS_BLOCK_SIZE)
94 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
95
96 return 0;
97
98dump:
99 ubifs_err("bad data node (block %u, inode %lu)",
100 block, inode->i_ino);
101 dbg_dump_node(c, dn);
102 return -EINVAL;
103}
104
105static int do_readpage(struct page *page)
106{
107 void *addr;
108 int err = 0, i;
109 unsigned int block, beyond;
110 struct ubifs_data_node *dn;
111 struct inode *inode = page->mapping->host;
112 loff_t i_size = i_size_read(inode);
113
114 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
115 inode->i_ino, page->index, i_size, page->flags);
116 ubifs_assert(!PageChecked(page));
117 ubifs_assert(!PagePrivate(page));
118
119 addr = kmap(page);
120
121 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
122 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
123 if (block >= beyond) {
124
125 SetPageChecked(page);
126 memset(addr, 0, PAGE_CACHE_SIZE);
127 goto out;
128 }
129
130 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
131 if (!dn) {
132 err = -ENOMEM;
133 goto error;
134 }
135
136 i = 0;
137 while (1) {
138 int ret;
139
140 if (block >= beyond) {
141
142 err = -ENOENT;
143 memset(addr, 0, UBIFS_BLOCK_SIZE);
144 } else {
145 ret = read_block(inode, addr, block, dn);
146 if (ret) {
147 err = ret;
148 if (err != -ENOENT)
149 break;
150 } else if (block + 1 == beyond) {
151 int dlen = le32_to_cpu(dn->size);
152 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
153
154 if (ilen && ilen < dlen)
155 memset(addr + ilen, 0, dlen - ilen);
156 }
157 }
158 if (++i >= UBIFS_BLOCKS_PER_PAGE)
159 break;
160 block += 1;
161 addr += UBIFS_BLOCK_SIZE;
162 }
163 if (err) {
164 if (err == -ENOENT) {
165
166 SetPageChecked(page);
167 dbg_gen("hole");
168 goto out_free;
169 }
170 ubifs_err("cannot read page %lu of inode %lu, error %d",
171 page->index, inode->i_ino, err);
172 goto error;
173 }
174
175out_free:
176 kfree(dn);
177out:
178 SetPageUptodate(page);
179 ClearPageError(page);
180 flush_dcache_page(page);
181 kunmap(page);
182 return 0;
183
184error:
185 kfree(dn);
186 ClearPageUptodate(page);
187 SetPageError(page);
188 flush_dcache_page(page);
189 kunmap(page);
190 return err;
191}
192
193
194
195
196
197
198
199
200static void release_new_page_budget(struct ubifs_info *c)
201{
202 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
203
204 ubifs_release_budget(c, &req);
205}
206
207
208
209
210
211
212
213
214static void release_existing_page_budget(struct ubifs_info *c)
215{
216 struct ubifs_budget_req req = { .dd_growth = c->page_budget};
217
218 ubifs_release_budget(c, &req);
219}
220
221static int write_begin_slow(struct address_space *mapping,
222 loff_t pos, unsigned len, struct page **pagep)
223{
224 struct inode *inode = mapping->host;
225 struct ubifs_info *c = inode->i_sb->s_fs_info;
226 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
227 struct ubifs_budget_req req = { .new_page = 1 };
228 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
229 struct page *page;
230
231 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
232 inode->i_ino, pos, len, inode->i_size);
233
234
235
236
237
238
239
240
241
242 if (appending)
243
244 req.dirtied_ino = 1;
245
246 err = ubifs_budget_space(c, &req);
247 if (unlikely(err))
248 return err;
249
250 page = __grab_cache_page(mapping, index);
251 if (unlikely(!page)) {
252 ubifs_release_budget(c, &req);
253 return -ENOMEM;
254 }
255
256 if (!PageUptodate(page)) {
257 if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
258 SetPageChecked(page);
259 else {
260 err = do_readpage(page);
261 if (err) {
262 unlock_page(page);
263 page_cache_release(page);
264 return err;
265 }
266 }
267
268 SetPageUptodate(page);
269 ClearPageError(page);
270 }
271
272 if (PagePrivate(page))
273
274
275
276
277
278
279
280
281
282
283 release_new_page_budget(c);
284 else if (!PageChecked(page))
285
286
287
288
289
290
291 ubifs_convert_page_budget(c);
292
293 if (appending) {
294 struct ubifs_inode *ui = ubifs_inode(inode);
295
296
297
298
299
300
301 mutex_lock(&ui->ui_mutex);
302 if (ui->dirty)
303
304
305
306
307 ubifs_release_dirty_inode_budget(c, ui);
308 }
309
310 *pagep = page;
311 return 0;
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static int allocate_budget(struct ubifs_info *c, struct page *page,
328 struct ubifs_inode *ui, int appending)
329{
330 struct ubifs_budget_req req = { .fast = 1 };
331
332 if (PagePrivate(page)) {
333 if (!appending)
334
335
336
337
338 return 0;
339
340 mutex_lock(&ui->ui_mutex);
341 if (ui->dirty)
342
343
344
345
346
347
348
349
350
351 return 0;
352
353
354
355
356
357 req.dirtied_ino = 1;
358 } else {
359 if (PageChecked(page))
360
361
362
363
364
365
366
367 req.new_page = 1;
368 else
369
370
371
372
373
374 req.dirtied_page = 1;
375
376 if (appending) {
377 mutex_lock(&ui->ui_mutex);
378 if (!ui->dirty)
379
380
381
382
383
384 req.dirtied_ino = 1;
385 }
386 }
387
388 return ubifs_budget_space(c, &req);
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423static int ubifs_write_begin(struct file *file, struct address_space *mapping,
424 loff_t pos, unsigned len, unsigned flags,
425 struct page **pagep, void **fsdata)
426{
427 struct inode *inode = mapping->host;
428 struct ubifs_info *c = inode->i_sb->s_fs_info;
429 struct ubifs_inode *ui = ubifs_inode(inode);
430 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
431 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
432 struct page *page;
433
434
435 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
436
437 if (unlikely(c->ro_media))
438 return -EROFS;
439
440
441 page = __grab_cache_page(mapping, index);
442 if (unlikely(!page))
443 return -ENOMEM;
444
445 if (!PageUptodate(page)) {
446
447 if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
448
449
450
451
452
453
454
455 SetPageChecked(page);
456 else {
457 err = do_readpage(page);
458 if (err) {
459 unlock_page(page);
460 page_cache_release(page);
461 return err;
462 }
463 }
464
465 SetPageUptodate(page);
466 ClearPageError(page);
467 }
468
469 err = allocate_budget(c, page, ui, appending);
470 if (unlikely(err)) {
471 ubifs_assert(err == -ENOSPC);
472
473
474
475
476
477
478
479 if (appending) {
480 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
481 mutex_unlock(&ui->ui_mutex);
482 }
483 unlock_page(page);
484 page_cache_release(page);
485
486 return write_begin_slow(mapping, pos, len, pagep);
487 }
488
489
490
491
492
493
494
495 *pagep = page;
496 return 0;
497
498}
499
500
501
502
503
504
505
506
507
508
509
510static void cancel_budget(struct ubifs_info *c, struct page *page,
511 struct ubifs_inode *ui, int appending)
512{
513 if (appending) {
514 if (!ui->dirty)
515 ubifs_release_dirty_inode_budget(c, ui);
516 mutex_unlock(&ui->ui_mutex);
517 }
518 if (!PagePrivate(page)) {
519 if (PageChecked(page))
520 release_new_page_budget(c);
521 else
522 release_existing_page_budget(c);
523 }
524}
525
526static int ubifs_write_end(struct file *file, struct address_space *mapping,
527 loff_t pos, unsigned len, unsigned copied,
528 struct page *page, void *fsdata)
529{
530 struct inode *inode = mapping->host;
531 struct ubifs_inode *ui = ubifs_inode(inode);
532 struct ubifs_info *c = inode->i_sb->s_fs_info;
533 loff_t end_pos = pos + len;
534 int appending = !!(end_pos > inode->i_size);
535
536 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
537 inode->i_ino, pos, page->index, len, copied, inode->i_size);
538
539 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
540
541
542
543
544
545
546
547
548
549 dbg_gen("copied %d instead of %d, read page and repeat",
550 copied, len);
551 cancel_budget(c, page, ui, appending);
552
553
554
555
556
557 copied = do_readpage(page);
558 goto out;
559 }
560
561 if (!PagePrivate(page)) {
562 SetPagePrivate(page);
563 atomic_long_inc(&c->dirty_pg_cnt);
564 __set_page_dirty_nobuffers(page);
565 }
566
567 if (appending) {
568 i_size_write(inode, end_pos);
569 ui->ui_size = end_pos;
570
571
572
573
574
575 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
576 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
577 mutex_unlock(&ui->ui_mutex);
578 }
579
580out:
581 unlock_page(page);
582 page_cache_release(page);
583 return copied;
584}
585
586
587
588
589
590
591
592
593
594
595static int populate_page(struct ubifs_info *c, struct page *page,
596 struct bu_info *bu, int *n)
597{
598 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
599 struct inode *inode = page->mapping->host;
600 loff_t i_size = i_size_read(inode);
601 unsigned int page_block;
602 void *addr, *zaddr;
603 pgoff_t end_index;
604
605 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
606 inode->i_ino, page->index, i_size, page->flags);
607
608 addr = zaddr = kmap(page);
609
610 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
611 if (!i_size || page->index > end_index) {
612 hole = 1;
613 memset(addr, 0, PAGE_CACHE_SIZE);
614 goto out_hole;
615 }
616
617 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
618 while (1) {
619 int err, len, out_len, dlen;
620
621 if (nn >= bu->cnt) {
622 hole = 1;
623 memset(addr, 0, UBIFS_BLOCK_SIZE);
624 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
625 struct ubifs_data_node *dn;
626
627 dn = bu->buf + (bu->zbranch[nn].offs - offs);
628
629 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
630 ubifs_inode(inode)->creat_sqnum);
631
632 len = le32_to_cpu(dn->size);
633 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
634 goto out_err;
635
636 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
637 out_len = UBIFS_BLOCK_SIZE;
638 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
639 le16_to_cpu(dn->compr_type));
640 if (err || len != out_len)
641 goto out_err;
642
643 if (len < UBIFS_BLOCK_SIZE)
644 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
645
646 nn += 1;
647 read = (i << UBIFS_BLOCK_SHIFT) + len;
648 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
649 nn += 1;
650 continue;
651 } else {
652 hole = 1;
653 memset(addr, 0, UBIFS_BLOCK_SIZE);
654 }
655 if (++i >= UBIFS_BLOCKS_PER_PAGE)
656 break;
657 addr += UBIFS_BLOCK_SIZE;
658 page_block += 1;
659 }
660
661 if (end_index == page->index) {
662 int len = i_size & (PAGE_CACHE_SIZE - 1);
663
664 if (len && len < read)
665 memset(zaddr + len, 0, read - len);
666 }
667
668out_hole:
669 if (hole) {
670 SetPageChecked(page);
671 dbg_gen("hole");
672 }
673
674 SetPageUptodate(page);
675 ClearPageError(page);
676 flush_dcache_page(page);
677 kunmap(page);
678 *n = nn;
679 return 0;
680
681out_err:
682 ClearPageUptodate(page);
683 SetPageError(page);
684 flush_dcache_page(page);
685 kunmap(page);
686 ubifs_err("bad data node (block %u, inode %lu)",
687 page_block, inode->i_ino);
688 return -EINVAL;
689}
690
691
692
693
694
695
696
697
698
699static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
700 struct page *page1)
701{
702 pgoff_t offset = page1->index, end_index;
703 struct address_space *mapping = page1->mapping;
704 struct inode *inode = mapping->host;
705 struct ubifs_inode *ui = ubifs_inode(inode);
706 int err, page_idx, page_cnt, ret = 0, n = 0;
707 int allocate = bu->buf ? 0 : 1;
708 loff_t isize;
709
710 err = ubifs_tnc_get_bu_keys(c, bu);
711 if (err)
712 goto out_warn;
713
714 if (bu->eof) {
715
716 ui->read_in_a_row = 1;
717 ui->bulk_read = 0;
718 }
719
720 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
721 if (!page_cnt) {
722
723
724
725
726
727
728 goto out_bu_off;
729 }
730
731 if (bu->cnt) {
732 if (allocate) {
733
734
735
736
737 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
738 bu->zbranch[bu->cnt - 1].len -
739 bu->zbranch[0].offs;
740 ubifs_assert(bu->buf_len > 0);
741 ubifs_assert(bu->buf_len <= c->leb_size);
742 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
743 if (!bu->buf)
744 goto out_bu_off;
745 }
746
747 err = ubifs_tnc_bulk_read(c, bu);
748 if (err)
749 goto out_warn;
750 }
751
752 err = populate_page(c, page1, bu, &n);
753 if (err)
754 goto out_warn;
755
756 unlock_page(page1);
757 ret = 1;
758
759 isize = i_size_read(inode);
760 if (isize == 0)
761 goto out_free;
762 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
763
764 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
765 pgoff_t page_offset = offset + page_idx;
766 struct page *page;
767
768 if (page_offset > end_index)
769 break;
770 page = find_or_create_page(mapping, page_offset,
771 GFP_NOFS | __GFP_COLD);
772 if (!page)
773 break;
774 if (!PageUptodate(page))
775 err = populate_page(c, page, bu, &n);
776 unlock_page(page);
777 page_cache_release(page);
778 if (err)
779 break;
780 }
781
782 ui->last_page_read = offset + page_idx - 1;
783
784out_free:
785 if (allocate)
786 kfree(bu->buf);
787 return ret;
788
789out_warn:
790 ubifs_warn("ignoring error %d and skipping bulk-read", err);
791 goto out_free;
792
793out_bu_off:
794 ui->read_in_a_row = ui->bulk_read = 0;
795 goto out_free;
796}
797
798
799
800
801
802
803
804
805
806
807static int ubifs_bulk_read(struct page *page)
808{
809 struct inode *inode = page->mapping->host;
810 struct ubifs_info *c = inode->i_sb->s_fs_info;
811 struct ubifs_inode *ui = ubifs_inode(inode);
812 pgoff_t index = page->index, last_page_read = ui->last_page_read;
813 struct bu_info *bu;
814 int err = 0, allocated = 0;
815
816 ui->last_page_read = index;
817 if (!c->bulk_read)
818 return 0;
819
820
821
822
823
824 if (!mutex_trylock(&ui->ui_mutex))
825 return 0;
826
827 if (index != last_page_read + 1) {
828
829 ui->read_in_a_row = 1;
830 if (ui->bulk_read)
831 ui->bulk_read = 0;
832 goto out_unlock;
833 }
834
835 if (!ui->bulk_read) {
836 ui->read_in_a_row += 1;
837 if (ui->read_in_a_row < 3)
838 goto out_unlock;
839
840 ui->bulk_read = 1;
841 }
842
843
844
845
846
847 if (mutex_trylock(&c->bu_mutex))
848 bu = &c->bu;
849 else {
850 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
851 if (!bu)
852 goto out_unlock;
853
854 bu->buf = NULL;
855 allocated = 1;
856 }
857
858 bu->buf_len = c->max_bu_buf_len;
859 data_key_init(c, &bu->key, inode->i_ino,
860 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
861 err = ubifs_do_bulk_read(c, bu, page);
862
863 if (!allocated)
864 mutex_unlock(&c->bu_mutex);
865 else
866 kfree(bu);
867
868out_unlock:
869 mutex_unlock(&ui->ui_mutex);
870 return err;
871}
872
873static int ubifs_readpage(struct file *file, struct page *page)
874{
875 if (ubifs_bulk_read(page))
876 return 0;
877 do_readpage(page);
878 unlock_page(page);
879 return 0;
880}
881
882static int do_writepage(struct page *page, int len)
883{
884 int err = 0, i, blen;
885 unsigned int block;
886 void *addr;
887 union ubifs_key key;
888 struct inode *inode = page->mapping->host;
889 struct ubifs_info *c = inode->i_sb->s_fs_info;
890
891#ifdef UBIFS_DEBUG
892 spin_lock(&ui->ui_lock);
893 ubifs_assert(page->index <= ui->synced_i_size << PAGE_CACHE_SIZE);
894 spin_unlock(&ui->ui_lock);
895#endif
896
897
898 set_page_writeback(page);
899
900 addr = kmap(page);
901 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
902 i = 0;
903 while (len) {
904 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
905 data_key_init(c, &key, inode->i_ino, block);
906 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
907 if (err)
908 break;
909 if (++i >= UBIFS_BLOCKS_PER_PAGE)
910 break;
911 block += 1;
912 addr += blen;
913 len -= blen;
914 }
915 if (err) {
916 SetPageError(page);
917 ubifs_err("cannot write page %lu of inode %lu, error %d",
918 page->index, inode->i_ino, err);
919 ubifs_ro_mode(c, err);
920 }
921
922 ubifs_assert(PagePrivate(page));
923 if (PageChecked(page))
924 release_new_page_budget(c);
925 else
926 release_existing_page_budget(c);
927
928 atomic_long_dec(&c->dirty_pg_cnt);
929 ClearPagePrivate(page);
930 ClearPageChecked(page);
931
932 kunmap(page);
933 unlock_page(page);
934 end_page_writeback(page);
935 return err;
936}
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
981{
982 struct inode *inode = page->mapping->host;
983 struct ubifs_inode *ui = ubifs_inode(inode);
984 loff_t i_size = i_size_read(inode), synced_i_size;
985 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
986 int err, len = i_size & (PAGE_CACHE_SIZE - 1);
987 void *kaddr;
988
989 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
990 inode->i_ino, page->index, page->flags);
991 ubifs_assert(PagePrivate(page));
992
993
994 if (page->index > end_index || (page->index == end_index && !len)) {
995 err = 0;
996 goto out_unlock;
997 }
998
999 spin_lock(&ui->ui_lock);
1000 synced_i_size = ui->synced_i_size;
1001 spin_unlock(&ui->ui_lock);
1002
1003
1004 if (page->index < end_index) {
1005 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
1006 err = inode->i_sb->s_op->write_inode(inode, 1);
1007 if (err)
1008 goto out_unlock;
1009
1010
1011
1012
1013
1014
1015
1016
1017 }
1018 return do_writepage(page, PAGE_CACHE_SIZE);
1019 }
1020
1021
1022
1023
1024
1025
1026
1027
1028 kaddr = kmap_atomic(page, KM_USER0);
1029 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
1030 flush_dcache_page(page);
1031 kunmap_atomic(kaddr, KM_USER0);
1032
1033 if (i_size > synced_i_size) {
1034 err = inode->i_sb->s_op->write_inode(inode, 1);
1035 if (err)
1036 goto out_unlock;
1037 }
1038
1039 return do_writepage(page, len);
1040
1041out_unlock:
1042 unlock_page(page);
1043 return err;
1044}
1045
1046
1047
1048
1049
1050
1051static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1052{
1053 if (attr->ia_valid & ATTR_UID)
1054 inode->i_uid = attr->ia_uid;
1055 if (attr->ia_valid & ATTR_GID)
1056 inode->i_gid = attr->ia_gid;
1057 if (attr->ia_valid & ATTR_ATIME)
1058 inode->i_atime = timespec_trunc(attr->ia_atime,
1059 inode->i_sb->s_time_gran);
1060 if (attr->ia_valid & ATTR_MTIME)
1061 inode->i_mtime = timespec_trunc(attr->ia_mtime,
1062 inode->i_sb->s_time_gran);
1063 if (attr->ia_valid & ATTR_CTIME)
1064 inode->i_ctime = timespec_trunc(attr->ia_ctime,
1065 inode->i_sb->s_time_gran);
1066 if (attr->ia_valid & ATTR_MODE) {
1067 umode_t mode = attr->ia_mode;
1068
1069 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1070 mode &= ~S_ISGID;
1071 inode->i_mode = mode;
1072 }
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static int do_truncation(struct ubifs_info *c, struct inode *inode,
1086 const struct iattr *attr)
1087{
1088 int err;
1089 struct ubifs_budget_req req;
1090 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1091 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1092 struct ubifs_inode *ui = ubifs_inode(inode);
1093
1094 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1095 memset(&req, 0, sizeof(struct ubifs_budget_req));
1096
1097
1098
1099
1100
1101
1102 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1103 req.dirtied_page = 1;
1104
1105 req.dirtied_ino = 1;
1106
1107 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1108 err = ubifs_budget_space(c, &req);
1109 if (err) {
1110
1111
1112
1113
1114 if (new_size || err != -ENOSPC)
1115 return err;
1116 budgeted = 0;
1117 }
1118
1119 err = vmtruncate(inode, new_size);
1120 if (err)
1121 goto out_budg;
1122
1123 if (offset) {
1124 pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
1125 struct page *page;
1126
1127 page = find_lock_page(inode->i_mapping, index);
1128 if (page) {
1129 if (PageDirty(page)) {
1130
1131
1132
1133
1134
1135
1136
1137
1138 ubifs_assert(PagePrivate(page));
1139
1140 clear_page_dirty_for_io(page);
1141 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1142 offset = new_size &
1143 (PAGE_CACHE_SIZE - 1);
1144 err = do_writepage(page, offset);
1145 page_cache_release(page);
1146 if (err)
1147 goto out_budg;
1148
1149
1150
1151
1152 } else {
1153
1154
1155
1156
1157
1158 unlock_page(page);
1159 page_cache_release(page);
1160 }
1161 }
1162 }
1163
1164 mutex_lock(&ui->ui_mutex);
1165 ui->ui_size = inode->i_size;
1166
1167 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1168
1169 do_attr_changes(inode, attr);
1170
1171 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1172 mutex_unlock(&ui->ui_mutex);
1173out_budg:
1174 if (budgeted)
1175 ubifs_release_budget(c, &req);
1176 else {
1177 c->nospace = c->nospace_rp = 0;
1178 smp_wmb();
1179 }
1180 return err;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193static int do_setattr(struct ubifs_info *c, struct inode *inode,
1194 const struct iattr *attr)
1195{
1196 int err, release;
1197 loff_t new_size = attr->ia_size;
1198 struct ubifs_inode *ui = ubifs_inode(inode);
1199 struct ubifs_budget_req req = { .dirtied_ino = 1,
1200 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1201
1202 err = ubifs_budget_space(c, &req);
1203 if (err)
1204 return err;
1205
1206 if (attr->ia_valid & ATTR_SIZE) {
1207 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1208 err = vmtruncate(inode, new_size);
1209 if (err)
1210 goto out;
1211 }
1212
1213 mutex_lock(&ui->ui_mutex);
1214 if (attr->ia_valid & ATTR_SIZE) {
1215
1216 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1217
1218 ui->ui_size = inode->i_size;
1219 }
1220
1221 do_attr_changes(inode, attr);
1222
1223 release = ui->dirty;
1224 if (attr->ia_valid & ATTR_SIZE)
1225
1226
1227
1228
1229 __mark_inode_dirty(inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
1230 else
1231 mark_inode_dirty_sync(inode);
1232 mutex_unlock(&ui->ui_mutex);
1233
1234 if (release)
1235 ubifs_release_budget(c, &req);
1236 if (IS_SYNC(inode))
1237 err = inode->i_sb->s_op->write_inode(inode, 1);
1238 return err;
1239
1240out:
1241 ubifs_release_budget(c, &req);
1242 return err;
1243}
1244
1245int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1246{
1247 int err;
1248 struct inode *inode = dentry->d_inode;
1249 struct ubifs_info *c = inode->i_sb->s_fs_info;
1250
1251 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1252 inode->i_ino, inode->i_mode, attr->ia_valid);
1253 err = inode_change_ok(inode, attr);
1254 if (err)
1255 return err;
1256
1257 err = dbg_check_synced_i_size(inode);
1258 if (err)
1259 return err;
1260
1261 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1262
1263 err = do_truncation(c, inode, attr);
1264 else
1265 err = do_setattr(c, inode, attr);
1266
1267 return err;
1268}
1269
1270static void ubifs_invalidatepage(struct page *page, unsigned long offset)
1271{
1272 struct inode *inode = page->mapping->host;
1273 struct ubifs_info *c = inode->i_sb->s_fs_info;
1274
1275 ubifs_assert(PagePrivate(page));
1276 if (offset)
1277
1278 return;
1279
1280 if (PageChecked(page))
1281 release_new_page_budget(c);
1282 else
1283 release_existing_page_budget(c);
1284
1285 atomic_long_dec(&c->dirty_pg_cnt);
1286 ClearPagePrivate(page);
1287 ClearPageChecked(page);
1288}
1289
1290static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
1291{
1292 struct ubifs_inode *ui = ubifs_inode(dentry->d_inode);
1293
1294 nd_set_link(nd, ui->data);
1295 return NULL;
1296}
1297
1298int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1299{
1300 struct inode *inode = dentry->d_inode;
1301 struct ubifs_info *c = inode->i_sb->s_fs_info;
1302 int err;
1303
1304 dbg_gen("syncing inode %lu", inode->i_ino);
1305
1306
1307
1308
1309
1310 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1311 err = inode->i_sb->s_op->write_inode(inode, 1);
1312 if (err)
1313 return err;
1314 }
1315
1316
1317
1318
1319
1320 err = ubifs_sync_wbufs_by_inode(c, inode);
1321 if (err)
1322 return err;
1323
1324 return 0;
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336static inline int mctime_update_needed(const struct inode *inode,
1337 const struct timespec *now)
1338{
1339 if (!timespec_equal(&inode->i_mtime, now) ||
1340 !timespec_equal(&inode->i_ctime, now))
1341 return 1;
1342 return 0;
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354static int update_mctime(struct ubifs_info *c, struct inode *inode)
1355{
1356 struct timespec now = ubifs_current_time(inode);
1357 struct ubifs_inode *ui = ubifs_inode(inode);
1358
1359 if (mctime_update_needed(inode, &now)) {
1360 int err, release;
1361 struct ubifs_budget_req req = { .dirtied_ino = 1,
1362 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1363
1364 err = ubifs_budget_space(c, &req);
1365 if (err)
1366 return err;
1367
1368 mutex_lock(&ui->ui_mutex);
1369 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1370 release = ui->dirty;
1371 mark_inode_dirty_sync(inode);
1372 mutex_unlock(&ui->ui_mutex);
1373 if (release)
1374 ubifs_release_budget(c, &req);
1375 }
1376
1377 return 0;
1378}
1379
1380static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
1381 unsigned long nr_segs, loff_t pos)
1382{
1383 int err;
1384 ssize_t ret;
1385 struct inode *inode = iocb->ki_filp->f_mapping->host;
1386 struct ubifs_info *c = inode->i_sb->s_fs_info;
1387
1388 err = update_mctime(c, inode);
1389 if (err)
1390 return err;
1391
1392 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
1393 if (ret < 0)
1394 return ret;
1395
1396 if (ret > 0 && (IS_SYNC(inode) || iocb->ki_filp->f_flags & O_SYNC)) {
1397 err = ubifs_sync_wbufs_by_inode(c, inode);
1398 if (err)
1399 return err;
1400 }
1401
1402 return ret;
1403}
1404
1405static int ubifs_set_page_dirty(struct page *page)
1406{
1407 int ret;
1408
1409 ret = __set_page_dirty_nobuffers(page);
1410
1411
1412
1413
1414 ubifs_assert(ret == 0);
1415 return ret;
1416}
1417
1418static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1419{
1420
1421
1422
1423
1424 if (PageWriteback(page))
1425 return 0;
1426 ubifs_assert(PagePrivate(page));
1427 ubifs_assert(0);
1428 ClearPagePrivate(page);
1429 ClearPageChecked(page);
1430 return 1;
1431}
1432
1433
1434
1435
1436
1437static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
1438{
1439 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1440 struct ubifs_info *c = inode->i_sb->s_fs_info;
1441 struct timespec now = ubifs_current_time(inode);
1442 struct ubifs_budget_req req = { .new_page = 1 };
1443 int err, update_time;
1444
1445 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1446 i_size_read(inode));
1447 ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));
1448
1449 if (unlikely(c->ro_media))
1450 return -EROFS;
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 update_time = mctime_update_needed(inode, &now);
1471 if (update_time)
1472
1473
1474
1475
1476 req.dirtied_ino = 1;
1477
1478 err = ubifs_budget_space(c, &req);
1479 if (unlikely(err)) {
1480 if (err == -ENOSPC)
1481 ubifs_warn("out of space for mmapped file "
1482 "(inode number %lu)", inode->i_ino);
1483 return err;
1484 }
1485
1486 lock_page(page);
1487 if (unlikely(page->mapping != inode->i_mapping ||
1488 page_offset(page) > i_size_read(inode))) {
1489
1490 err = -EINVAL;
1491 goto out_unlock;
1492 }
1493
1494 if (PagePrivate(page))
1495 release_new_page_budget(c);
1496 else {
1497 if (!PageChecked(page))
1498 ubifs_convert_page_budget(c);
1499 SetPagePrivate(page);
1500 atomic_long_inc(&c->dirty_pg_cnt);
1501 __set_page_dirty_nobuffers(page);
1502 }
1503
1504 if (update_time) {
1505 int release;
1506 struct ubifs_inode *ui = ubifs_inode(inode);
1507
1508 mutex_lock(&ui->ui_mutex);
1509 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1510 release = ui->dirty;
1511 mark_inode_dirty_sync(inode);
1512 mutex_unlock(&ui->ui_mutex);
1513 if (release)
1514 ubifs_release_dirty_inode_budget(c, ui);
1515 }
1516
1517 unlock_page(page);
1518 return 0;
1519
1520out_unlock:
1521 unlock_page(page);
1522 ubifs_release_budget(c, &req);
1523 return err;
1524}
1525
1526static struct vm_operations_struct ubifs_file_vm_ops = {
1527 .fault = filemap_fault,
1528 .page_mkwrite = ubifs_vm_page_mkwrite,
1529};
1530
1531static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1532{
1533 int err;
1534
1535
1536 err = generic_file_mmap(file, vma);
1537 if (err)
1538 return err;
1539 vma->vm_ops = &ubifs_file_vm_ops;
1540 return 0;
1541}
1542
1543struct address_space_operations ubifs_file_address_operations = {
1544 .readpage = ubifs_readpage,
1545 .writepage = ubifs_writepage,
1546 .write_begin = ubifs_write_begin,
1547 .write_end = ubifs_write_end,
1548 .invalidatepage = ubifs_invalidatepage,
1549 .set_page_dirty = ubifs_set_page_dirty,
1550 .releasepage = ubifs_releasepage,
1551};
1552
1553struct inode_operations ubifs_file_inode_operations = {
1554 .setattr = ubifs_setattr,
1555 .getattr = ubifs_getattr,
1556#ifdef CONFIG_UBIFS_FS_XATTR
1557 .setxattr = ubifs_setxattr,
1558 .getxattr = ubifs_getxattr,
1559 .listxattr = ubifs_listxattr,
1560 .removexattr = ubifs_removexattr,
1561#endif
1562};
1563
1564struct inode_operations ubifs_symlink_inode_operations = {
1565 .readlink = generic_readlink,
1566 .follow_link = ubifs_follow_link,
1567 .setattr = ubifs_setattr,
1568 .getattr = ubifs_getattr,
1569};
1570
1571struct file_operations ubifs_file_operations = {
1572 .llseek = generic_file_llseek,
1573 .read = do_sync_read,
1574 .write = do_sync_write,
1575 .aio_read = generic_file_aio_read,
1576 .aio_write = ubifs_aio_write,
1577 .mmap = ubifs_file_mmap,
1578 .fsync = ubifs_fsync,
1579 .unlocked_ioctl = ubifs_ioctl,
1580 .splice_read = generic_file_splice_read,
1581 .splice_write = generic_file_splice_write,
1582#ifdef CONFIG_COMPAT
1583 .compat_ioctl = ubifs_compat_ioctl,
1584#endif
1585};