1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/mm.h>
42#include <linux/pagemap.h>
43#include <linux/swap.h>
44#include <linux/swapops.h>
45#include <linux/slab.h>
46#include <linux/init.h>
47#include <linux/rmap.h>
48#include <linux/rcupdate.h>
49#include <linux/module.h>
50#include <linux/kallsyms.h>
51#include <linux/memcontrol.h>
52#include <linux/mmu_notifier.h>
53
54#include <asm/tlbflush.h>
55
56#include "internal.h"
57
58static struct kmem_cache *anon_vma_cachep;
59
60static inline struct anon_vma *anon_vma_alloc(void)
61{
62 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
63}
64
65static inline void anon_vma_free(struct anon_vma *anon_vma)
66{
67 kmem_cache_free(anon_vma_cachep, anon_vma);
68}
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97int anon_vma_prepare(struct vm_area_struct *vma)
98{
99 struct anon_vma *anon_vma = vma->anon_vma;
100
101 might_sleep();
102 if (unlikely(!anon_vma)) {
103 struct mm_struct *mm = vma->vm_mm;
104 struct anon_vma *allocated;
105
106 anon_vma = find_mergeable_anon_vma(vma);
107 allocated = NULL;
108 if (!anon_vma) {
109 anon_vma = anon_vma_alloc();
110 if (unlikely(!anon_vma))
111 return -ENOMEM;
112 allocated = anon_vma;
113 }
114 spin_lock(&anon_vma->lock);
115
116
117 spin_lock(&mm->page_table_lock);
118 if (likely(!vma->anon_vma)) {
119 vma->anon_vma = anon_vma;
120 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
121 allocated = NULL;
122 }
123 spin_unlock(&mm->page_table_lock);
124
125 spin_unlock(&anon_vma->lock);
126 if (unlikely(allocated))
127 anon_vma_free(allocated);
128 }
129 return 0;
130}
131
132void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
133{
134 BUG_ON(vma->anon_vma != next->anon_vma);
135 list_del(&next->anon_vma_node);
136}
137
138void __anon_vma_link(struct vm_area_struct *vma)
139{
140 struct anon_vma *anon_vma = vma->anon_vma;
141
142 if (anon_vma)
143 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
144}
145
146void anon_vma_link(struct vm_area_struct *vma)
147{
148 struct anon_vma *anon_vma = vma->anon_vma;
149
150 if (anon_vma) {
151 spin_lock(&anon_vma->lock);
152 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
153 spin_unlock(&anon_vma->lock);
154 }
155}
156
157void anon_vma_unlink(struct vm_area_struct *vma)
158{
159 struct anon_vma *anon_vma = vma->anon_vma;
160 int empty;
161
162 if (!anon_vma)
163 return;
164
165 spin_lock(&anon_vma->lock);
166 list_del(&vma->anon_vma_node);
167
168
169 empty = list_empty(&anon_vma->head);
170 spin_unlock(&anon_vma->lock);
171
172 if (empty)
173 anon_vma_free(anon_vma);
174}
175
176static void anon_vma_ctor(void *data)
177{
178 struct anon_vma *anon_vma = data;
179
180 spin_lock_init(&anon_vma->lock);
181 INIT_LIST_HEAD(&anon_vma->head);
182}
183
184void __init anon_vma_init(void)
185{
186 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
187 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
188}
189
190
191
192
193
194struct anon_vma *page_lock_anon_vma(struct page *page)
195{
196 struct anon_vma *anon_vma;
197 unsigned long anon_mapping;
198
199 rcu_read_lock();
200 anon_mapping = (unsigned long) page->mapping;
201 if (!(anon_mapping & PAGE_MAPPING_ANON))
202 goto out;
203 if (!page_mapped(page))
204 goto out;
205
206 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
207 spin_lock(&anon_vma->lock);
208 return anon_vma;
209out:
210 rcu_read_unlock();
211 return NULL;
212}
213
214void page_unlock_anon_vma(struct anon_vma *anon_vma)
215{
216 spin_unlock(&anon_vma->lock);
217 rcu_read_unlock();
218}
219
220
221
222
223
224
225static inline unsigned long
226vma_address(struct page *page, struct vm_area_struct *vma)
227{
228 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
229 unsigned long address;
230
231 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
232 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
233
234 return -EFAULT;
235 }
236 return address;
237}
238
239
240
241
242
243unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
244{
245 if (PageAnon(page)) {
246 if ((void *)vma->anon_vma !=
247 (void *)page->mapping - PAGE_MAPPING_ANON)
248 return -EFAULT;
249 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
250 if (!vma->vm_file ||
251 vma->vm_file->f_mapping != page->mapping)
252 return -EFAULT;
253 } else
254 return -EFAULT;
255 return vma_address(page, vma);
256}
257
258
259
260
261
262
263
264
265
266
267pte_t *page_check_address(struct page *page, struct mm_struct *mm,
268 unsigned long address, spinlock_t **ptlp, int sync)
269{
270 pgd_t *pgd;
271 pud_t *pud;
272 pmd_t *pmd;
273 pte_t *pte;
274 spinlock_t *ptl;
275
276 pgd = pgd_offset(mm, address);
277 if (!pgd_present(*pgd))
278 return NULL;
279
280 pud = pud_offset(pgd, address);
281 if (!pud_present(*pud))
282 return NULL;
283
284 pmd = pmd_offset(pud, address);
285 if (!pmd_present(*pmd))
286 return NULL;
287
288 pte = pte_offset_map(pmd, address);
289
290 if (!sync && !pte_present(*pte)) {
291 pte_unmap(pte);
292 return NULL;
293 }
294
295 ptl = pte_lockptr(mm, pmd);
296 spin_lock(ptl);
297 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
298 *ptlp = ptl;
299 return pte;
300 }
301 pte_unmap_unlock(pte, ptl);
302 return NULL;
303}
304
305
306
307
308
309
310
311
312
313
314static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
315{
316 unsigned long address;
317 pte_t *pte;
318 spinlock_t *ptl;
319
320 address = vma_address(page, vma);
321 if (address == -EFAULT)
322 return 0;
323 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
324 if (!pte)
325 return 0;
326 pte_unmap_unlock(pte, ptl);
327
328 return 1;
329}
330
331
332
333
334
335static int page_referenced_one(struct page *page,
336 struct vm_area_struct *vma, unsigned int *mapcount)
337{
338 struct mm_struct *mm = vma->vm_mm;
339 unsigned long address;
340 pte_t *pte;
341 spinlock_t *ptl;
342 int referenced = 0;
343
344 address = vma_address(page, vma);
345 if (address == -EFAULT)
346 goto out;
347
348 pte = page_check_address(page, mm, address, &ptl, 0);
349 if (!pte)
350 goto out;
351
352
353
354
355
356
357 if (vma->vm_flags & VM_LOCKED) {
358 *mapcount = 1;
359 goto out_unmap;
360 }
361
362 if (ptep_clear_flush_young_notify(vma, address, pte))
363 referenced++;
364
365
366
367 if (mm != current->mm && has_swap_token(mm) &&
368 rwsem_is_locked(&mm->mmap_sem))
369 referenced++;
370
371out_unmap:
372 (*mapcount)--;
373 pte_unmap_unlock(pte, ptl);
374out:
375 return referenced;
376}
377
378static int page_referenced_anon(struct page *page,
379 struct mem_cgroup *mem_cont)
380{
381 unsigned int mapcount;
382 struct anon_vma *anon_vma;
383 struct vm_area_struct *vma;
384 int referenced = 0;
385
386 anon_vma = page_lock_anon_vma(page);
387 if (!anon_vma)
388 return referenced;
389
390 mapcount = page_mapcount(page);
391 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
392
393
394
395
396
397 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
398 continue;
399 referenced += page_referenced_one(page, vma, &mapcount);
400 if (!mapcount)
401 break;
402 }
403
404 page_unlock_anon_vma(anon_vma);
405 return referenced;
406}
407
408
409
410
411
412
413
414
415
416
417
418
419
420static int page_referenced_file(struct page *page,
421 struct mem_cgroup *mem_cont)
422{
423 unsigned int mapcount;
424 struct address_space *mapping = page->mapping;
425 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
426 struct vm_area_struct *vma;
427 struct prio_tree_iter iter;
428 int referenced = 0;
429
430
431
432
433
434
435 BUG_ON(PageAnon(page));
436
437
438
439
440
441
442
443 BUG_ON(!PageLocked(page));
444
445 spin_lock(&mapping->i_mmap_lock);
446
447
448
449
450
451 mapcount = page_mapcount(page);
452
453 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
454
455
456
457
458
459 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
460 continue;
461 referenced += page_referenced_one(page, vma, &mapcount);
462 if (!mapcount)
463 break;
464 }
465
466 spin_unlock(&mapping->i_mmap_lock);
467 return referenced;
468}
469
470
471
472
473
474
475
476
477
478
479int page_referenced(struct page *page, int is_locked,
480 struct mem_cgroup *mem_cont)
481{
482 int referenced = 0;
483
484 if (TestClearPageReferenced(page))
485 referenced++;
486
487 if (page_mapped(page) && page->mapping) {
488 if (PageAnon(page))
489 referenced += page_referenced_anon(page, mem_cont);
490 else if (is_locked)
491 referenced += page_referenced_file(page, mem_cont);
492 else if (!trylock_page(page))
493 referenced++;
494 else {
495 if (page->mapping)
496 referenced +=
497 page_referenced_file(page, mem_cont);
498 unlock_page(page);
499 }
500 }
501
502 if (page_test_and_clear_young(page))
503 referenced++;
504
505 return referenced;
506}
507
508static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
509{
510 struct mm_struct *mm = vma->vm_mm;
511 unsigned long address;
512 pte_t *pte;
513 spinlock_t *ptl;
514 int ret = 0;
515
516 address = vma_address(page, vma);
517 if (address == -EFAULT)
518 goto out;
519
520 pte = page_check_address(page, mm, address, &ptl, 1);
521 if (!pte)
522 goto out;
523
524 if (pte_dirty(*pte) || pte_write(*pte)) {
525 pte_t entry;
526
527 flush_cache_page(vma, address, pte_pfn(*pte));
528 entry = ptep_clear_flush_notify(vma, address, pte);
529 entry = pte_wrprotect(entry);
530 entry = pte_mkclean(entry);
531 set_pte_at(mm, address, pte, entry);
532 ret = 1;
533 }
534
535 pte_unmap_unlock(pte, ptl);
536out:
537 return ret;
538}
539
540static int page_mkclean_file(struct address_space *mapping, struct page *page)
541{
542 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
543 struct vm_area_struct *vma;
544 struct prio_tree_iter iter;
545 int ret = 0;
546
547 BUG_ON(PageAnon(page));
548
549 spin_lock(&mapping->i_mmap_lock);
550 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
551 if (vma->vm_flags & VM_SHARED)
552 ret += page_mkclean_one(page, vma);
553 }
554 spin_unlock(&mapping->i_mmap_lock);
555 return ret;
556}
557
558int page_mkclean(struct page *page)
559{
560 int ret = 0;
561
562 BUG_ON(!PageLocked(page));
563
564 if (page_mapped(page)) {
565 struct address_space *mapping = page_mapping(page);
566 if (mapping) {
567 ret = page_mkclean_file(mapping, page);
568 if (page_test_dirty(page)) {
569 page_clear_dirty(page);
570 ret = 1;
571 }
572 }
573 }
574
575 return ret;
576}
577EXPORT_SYMBOL_GPL(page_mkclean);
578
579
580
581
582
583
584
585static void __page_set_anon_rmap(struct page *page,
586 struct vm_area_struct *vma, unsigned long address)
587{
588 struct anon_vma *anon_vma = vma->anon_vma;
589
590 BUG_ON(!anon_vma);
591 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
592 page->mapping = (struct address_space *) anon_vma;
593
594 page->index = linear_page_index(vma, address);
595
596
597
598
599
600 __inc_zone_page_state(page, NR_ANON_PAGES);
601}
602
603
604
605
606
607
608
609static void __page_check_anon_rmap(struct page *page,
610 struct vm_area_struct *vma, unsigned long address)
611{
612#ifdef CONFIG_DEBUG_VM
613
614
615
616
617
618
619
620
621
622
623
624
625 struct anon_vma *anon_vma = vma->anon_vma;
626 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
627 BUG_ON(page->mapping != (struct address_space *)anon_vma);
628 BUG_ON(page->index != linear_page_index(vma, address));
629#endif
630}
631
632
633
634
635
636
637
638
639
640void page_add_anon_rmap(struct page *page,
641 struct vm_area_struct *vma, unsigned long address)
642{
643 VM_BUG_ON(!PageLocked(page));
644 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
645 if (atomic_inc_and_test(&page->_mapcount))
646 __page_set_anon_rmap(page, vma, address);
647 else
648 __page_check_anon_rmap(page, vma, address);
649}
650
651
652
653
654
655
656
657
658
659
660
661void page_add_new_anon_rmap(struct page *page,
662 struct vm_area_struct *vma, unsigned long address)
663{
664 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
665 atomic_set(&page->_mapcount, 0);
666 __page_set_anon_rmap(page, vma, address);
667}
668
669
670
671
672
673
674
675void page_add_file_rmap(struct page *page)
676{
677 if (atomic_inc_and_test(&page->_mapcount))
678 __inc_zone_page_state(page, NR_FILE_MAPPED);
679}
680
681#ifdef CONFIG_DEBUG_VM
682
683
684
685
686
687
688
689
690
691
692
693
694void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
695{
696 BUG_ON(page_mapcount(page) == 0);
697 if (PageAnon(page))
698 __page_check_anon_rmap(page, vma, address);
699 atomic_inc(&page->_mapcount);
700}
701#endif
702
703
704
705
706
707
708
709
710void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
711{
712 if (atomic_add_negative(-1, &page->_mapcount)) {
713 if (unlikely(page_mapcount(page) < 0)) {
714 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
715 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page));
716 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
717 printk (KERN_EMERG " page->count = %x\n", page_count(page));
718 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
719 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
720 if (vma->vm_ops) {
721 print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
722 }
723 if (vma->vm_file && vma->vm_file->f_op)
724 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
725 BUG();
726 }
727
728
729
730
731
732
733
734
735 if ((!PageAnon(page) || PageSwapCache(page)) &&
736 page_test_dirty(page)) {
737 page_clear_dirty(page);
738 set_page_dirty(page);
739 }
740 if (PageAnon(page))
741 mem_cgroup_uncharge_page(page);
742 __dec_zone_page_state(page,
743 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
744
745
746
747
748
749
750
751
752
753 }
754}
755
756
757
758
759
760static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
761 int migration)
762{
763 struct mm_struct *mm = vma->vm_mm;
764 unsigned long address;
765 pte_t *pte;
766 pte_t pteval;
767 spinlock_t *ptl;
768 int ret = SWAP_AGAIN;
769
770 address = vma_address(page, vma);
771 if (address == -EFAULT)
772 goto out;
773
774 pte = page_check_address(page, mm, address, &ptl, 0);
775 if (!pte)
776 goto out;
777
778
779
780
781
782
783 if (!migration) {
784 if (vma->vm_flags & VM_LOCKED) {
785 ret = SWAP_MLOCK;
786 goto out_unmap;
787 }
788 if (ptep_clear_flush_young_notify(vma, address, pte)) {
789 ret = SWAP_FAIL;
790 goto out_unmap;
791 }
792 }
793
794
795 flush_cache_page(vma, address, page_to_pfn(page));
796 pteval = ptep_clear_flush_notify(vma, address, pte);
797
798
799 if (pte_dirty(pteval))
800 set_page_dirty(page);
801
802
803 update_hiwater_rss(mm);
804
805 if (PageAnon(page)) {
806 swp_entry_t entry = { .val = page_private(page) };
807
808 if (PageSwapCache(page)) {
809
810
811
812
813 swap_duplicate(entry);
814 if (list_empty(&mm->mmlist)) {
815 spin_lock(&mmlist_lock);
816 if (list_empty(&mm->mmlist))
817 list_add(&mm->mmlist, &init_mm.mmlist);
818 spin_unlock(&mmlist_lock);
819 }
820 dec_mm_counter(mm, anon_rss);
821#ifdef CONFIG_MIGRATION
822 } else {
823
824
825
826
827
828 BUG_ON(!migration);
829 entry = make_migration_entry(page, pte_write(pteval));
830#endif
831 }
832 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
833 BUG_ON(pte_file(*pte));
834 } else
835#ifdef CONFIG_MIGRATION
836 if (migration) {
837
838 swp_entry_t entry;
839 entry = make_migration_entry(page, pte_write(pteval));
840 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
841 } else
842#endif
843 dec_mm_counter(mm, file_rss);
844
845
846 page_remove_rmap(page, vma);
847 page_cache_release(page);
848
849out_unmap:
850 pte_unmap_unlock(pte, ptl);
851out:
852 return ret;
853}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
880#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
881
882static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
883 struct vm_area_struct *vma, struct page *check_page)
884{
885 struct mm_struct *mm = vma->vm_mm;
886 pgd_t *pgd;
887 pud_t *pud;
888 pmd_t *pmd;
889 pte_t *pte;
890 pte_t pteval;
891 spinlock_t *ptl;
892 struct page *page;
893 unsigned long address;
894 unsigned long end;
895 int ret = SWAP_AGAIN;
896 int locked_vma = 0;
897
898 address = (vma->vm_start + cursor) & CLUSTER_MASK;
899 end = address + CLUSTER_SIZE;
900 if (address < vma->vm_start)
901 address = vma->vm_start;
902 if (end > vma->vm_end)
903 end = vma->vm_end;
904
905 pgd = pgd_offset(mm, address);
906 if (!pgd_present(*pgd))
907 return ret;
908
909 pud = pud_offset(pgd, address);
910 if (!pud_present(*pud))
911 return ret;
912
913 pmd = pmd_offset(pud, address);
914 if (!pmd_present(*pmd))
915 return ret;
916
917
918
919
920
921
922 if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
923 locked_vma = (vma->vm_flags & VM_LOCKED);
924 if (!locked_vma)
925 up_read(&vma->vm_mm->mmap_sem);
926 }
927
928 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
929
930
931 update_hiwater_rss(mm);
932
933 for (; address < end; pte++, address += PAGE_SIZE) {
934 if (!pte_present(*pte))
935 continue;
936 page = vm_normal_page(vma, address, *pte);
937 BUG_ON(!page || PageAnon(page));
938
939 if (locked_vma) {
940 mlock_vma_page(page);
941 if (page == check_page)
942 ret = SWAP_MLOCK;
943 continue;
944 }
945
946 if (ptep_clear_flush_young_notify(vma, address, pte))
947 continue;
948
949
950 flush_cache_page(vma, address, pte_pfn(*pte));
951 pteval = ptep_clear_flush_notify(vma, address, pte);
952
953
954 if (page->index != linear_page_index(vma, address))
955 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
956
957
958 if (pte_dirty(pteval))
959 set_page_dirty(page);
960
961 page_remove_rmap(page, vma);
962 page_cache_release(page);
963 dec_mm_counter(mm, file_rss);
964 (*mapcount)--;
965 }
966 pte_unmap_unlock(pte - 1, ptl);
967 if (locked_vma)
968 up_read(&vma->vm_mm->mmap_sem);
969 return ret;
970}
971
972
973
974
975static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
976{
977 int mlocked = 0;
978
979 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
980 if (vma->vm_flags & VM_LOCKED) {
981 mlock_vma_page(page);
982 mlocked++;
983 }
984 up_read(&vma->vm_mm->mmap_sem);
985 }
986 return mlocked;
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static int try_to_unmap_anon(struct page *page, int unlock, int migration)
1007{
1008 struct anon_vma *anon_vma;
1009 struct vm_area_struct *vma;
1010 unsigned int mlocked = 0;
1011 int ret = SWAP_AGAIN;
1012
1013 if (MLOCK_PAGES && unlikely(unlock))
1014 ret = SWAP_SUCCESS;
1015
1016 anon_vma = page_lock_anon_vma(page);
1017 if (!anon_vma)
1018 return ret;
1019
1020 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1021 if (MLOCK_PAGES && unlikely(unlock)) {
1022 if (!((vma->vm_flags & VM_LOCKED) &&
1023 page_mapped_in_vma(page, vma)))
1024 continue;
1025 ret = SWAP_MLOCK;
1026 } else {
1027 ret = try_to_unmap_one(page, vma, migration);
1028 if (ret == SWAP_FAIL || !page_mapped(page))
1029 break;
1030 }
1031 if (ret == SWAP_MLOCK) {
1032 mlocked = try_to_mlock_page(page, vma);
1033 if (mlocked)
1034 break;
1035 }
1036 }
1037
1038 page_unlock_anon_vma(anon_vma);
1039
1040 if (mlocked)
1041 ret = SWAP_MLOCK;
1042 else if (ret == SWAP_MLOCK)
1043 ret = SWAP_AGAIN;
1044
1045 return ret;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static int try_to_unmap_file(struct page *page, int unlock, int migration)
1065{
1066 struct address_space *mapping = page->mapping;
1067 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1068 struct vm_area_struct *vma;
1069 struct prio_tree_iter iter;
1070 int ret = SWAP_AGAIN;
1071 unsigned long cursor;
1072 unsigned long max_nl_cursor = 0;
1073 unsigned long max_nl_size = 0;
1074 unsigned int mapcount;
1075 unsigned int mlocked = 0;
1076
1077 if (MLOCK_PAGES && unlikely(unlock))
1078 ret = SWAP_SUCCESS;
1079
1080 spin_lock(&mapping->i_mmap_lock);
1081 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1082 if (MLOCK_PAGES && unlikely(unlock)) {
1083 if (!(vma->vm_flags & VM_LOCKED))
1084 continue;
1085 ret = SWAP_MLOCK;
1086 } else {
1087 ret = try_to_unmap_one(page, vma, migration);
1088 if (ret == SWAP_FAIL || !page_mapped(page))
1089 goto out;
1090 }
1091 if (ret == SWAP_MLOCK) {
1092 mlocked = try_to_mlock_page(page, vma);
1093 if (mlocked)
1094 break;
1095 }
1096 }
1097
1098 if (mlocked)
1099 goto out;
1100
1101 if (list_empty(&mapping->i_mmap_nonlinear))
1102 goto out;
1103
1104 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1105 shared.vm_set.list) {
1106 if (MLOCK_PAGES && unlikely(unlock)) {
1107 if (!(vma->vm_flags & VM_LOCKED))
1108 continue;
1109 ret = SWAP_MLOCK;
1110 goto out;
1111 }
1112 if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED))
1113 continue;
1114 cursor = (unsigned long) vma->vm_private_data;
1115 if (cursor > max_nl_cursor)
1116 max_nl_cursor = cursor;
1117 cursor = vma->vm_end - vma->vm_start;
1118 if (cursor > max_nl_size)
1119 max_nl_size = cursor;
1120 }
1121
1122 if (max_nl_size == 0) {
1123 ret = SWAP_FAIL;
1124 goto out;
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134 mapcount = page_mapcount(page);
1135 if (!mapcount)
1136 goto out;
1137 cond_resched_lock(&mapping->i_mmap_lock);
1138
1139 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1140 if (max_nl_cursor == 0)
1141 max_nl_cursor = CLUSTER_SIZE;
1142
1143 do {
1144 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1145 shared.vm_set.list) {
1146 if (!MLOCK_PAGES && !migration &&
1147 (vma->vm_flags & VM_LOCKED))
1148 continue;
1149 cursor = (unsigned long) vma->vm_private_data;
1150 while ( cursor < max_nl_cursor &&
1151 cursor < vma->vm_end - vma->vm_start) {
1152 ret = try_to_unmap_cluster(cursor, &mapcount,
1153 vma, page);
1154 if (ret == SWAP_MLOCK)
1155 mlocked = 2;
1156 cursor += CLUSTER_SIZE;
1157 vma->vm_private_data = (void *) cursor;
1158 if ((int)mapcount <= 0)
1159 goto out;
1160 }
1161 vma->vm_private_data = (void *) max_nl_cursor;
1162 }
1163 cond_resched_lock(&mapping->i_mmap_lock);
1164 max_nl_cursor += CLUSTER_SIZE;
1165 } while (max_nl_cursor <= max_nl_size);
1166
1167
1168
1169
1170
1171
1172 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1173 vma->vm_private_data = NULL;
1174out:
1175 spin_unlock(&mapping->i_mmap_lock);
1176 if (mlocked)
1177 ret = SWAP_MLOCK;
1178 else if (ret == SWAP_MLOCK)
1179 ret = SWAP_AGAIN;
1180 return ret;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197int try_to_unmap(struct page *page, int migration)
1198{
1199 int ret;
1200
1201 BUG_ON(!PageLocked(page));
1202
1203 if (PageAnon(page))
1204 ret = try_to_unmap_anon(page, 0, migration);
1205 else
1206 ret = try_to_unmap_file(page, 0, migration);
1207 if (ret != SWAP_MLOCK && !page_mapped(page))
1208 ret = SWAP_SUCCESS;
1209 return ret;
1210}
1211
1212#ifdef CONFIG_UNEVICTABLE_LRU
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227int try_to_munlock(struct page *page)
1228{
1229 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1230
1231 if (PageAnon(page))
1232 return try_to_unmap_anon(page, 1, 0);
1233 else
1234 return try_to_unmap_file(page, 1, 0);
1235}
1236#endif