1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/rmap.h>
25#include <linux/topology.h>
26#include <linux/cpu.h>
27#include <linux/cpuset.h>
28#include <linux/writeback.h>
29#include <linux/mempolicy.h>
30#include <linux/vmalloc.h>
31#include <linux/security.h>
32#include <linux/memcontrol.h>
33#include <linux/syscalls.h>
34
35#include "internal.h"
36
37#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
38
39
40
41
42
43int migrate_prep(void)
44{
45
46
47
48
49
50
51 lru_add_drain_all();
52
53 return 0;
54}
55
56
57
58
59
60
61
62int putback_lru_pages(struct list_head *l)
63{
64 struct page *page;
65 struct page *page2;
66 int count = 0;
67
68 list_for_each_entry_safe(page, page2, l, lru) {
69 list_del(&page->lru);
70 putback_lru_page(page);
71 count++;
72 }
73 return count;
74}
75
76
77
78
79static void remove_migration_pte(struct vm_area_struct *vma,
80 struct page *old, struct page *new)
81{
82 struct mm_struct *mm = vma->vm_mm;
83 swp_entry_t entry;
84 pgd_t *pgd;
85 pud_t *pud;
86 pmd_t *pmd;
87 pte_t *ptep, pte;
88 spinlock_t *ptl;
89 unsigned long addr = page_address_in_vma(new, vma);
90
91 if (addr == -EFAULT)
92 return;
93
94 pgd = pgd_offset(mm, addr);
95 if (!pgd_present(*pgd))
96 return;
97
98 pud = pud_offset(pgd, addr);
99 if (!pud_present(*pud))
100 return;
101
102 pmd = pmd_offset(pud, addr);
103 if (!pmd_present(*pmd))
104 return;
105
106 ptep = pte_offset_map(pmd, addr);
107
108 if (!is_swap_pte(*ptep)) {
109 pte_unmap(ptep);
110 return;
111 }
112
113 ptl = pte_lockptr(mm, pmd);
114 spin_lock(ptl);
115 pte = *ptep;
116 if (!is_swap_pte(pte))
117 goto out;
118
119 entry = pte_to_swp_entry(pte);
120
121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
122 goto out;
123
124
125
126
127
128
129
130
131
132
133
134
135
136 mem_cgroup_charge(new, mm, GFP_ATOMIC);
137
138 get_page(new);
139 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
140 if (is_write_migration_entry(entry))
141 pte = pte_mkwrite(pte);
142 flush_cache_page(vma, addr, pte_pfn(pte));
143 set_pte_at(mm, addr, ptep, pte);
144
145 if (PageAnon(new))
146 page_add_anon_rmap(new, vma, addr);
147 else
148 page_add_file_rmap(new);
149
150
151 update_mmu_cache(vma, addr, pte);
152
153out:
154 pte_unmap_unlock(ptep, ptl);
155}
156
157
158
159
160
161static void remove_file_migration_ptes(struct page *old, struct page *new)
162{
163 struct vm_area_struct *vma;
164 struct address_space *mapping = page_mapping(new);
165 struct prio_tree_iter iter;
166 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
167
168 if (!mapping)
169 return;
170
171 spin_lock(&mapping->i_mmap_lock);
172
173 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
174 remove_migration_pte(vma, old, new);
175
176 spin_unlock(&mapping->i_mmap_lock);
177}
178
179
180
181
182
183static void remove_anon_migration_ptes(struct page *old, struct page *new)
184{
185 struct anon_vma *anon_vma;
186 struct vm_area_struct *vma;
187 unsigned long mapping;
188
189 mapping = (unsigned long)new->mapping;
190
191 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
192 return;
193
194
195
196
197 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
198 spin_lock(&anon_vma->lock);
199
200 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
201 remove_migration_pte(vma, old, new);
202
203 spin_unlock(&anon_vma->lock);
204}
205
206
207
208
209
210static void remove_migration_ptes(struct page *old, struct page *new)
211{
212 if (PageAnon(new))
213 remove_anon_migration_ptes(old, new);
214 else
215 remove_file_migration_ptes(old, new);
216}
217
218
219
220
221
222
223
224
225void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
226 unsigned long address)
227{
228 pte_t *ptep, pte;
229 spinlock_t *ptl;
230 swp_entry_t entry;
231 struct page *page;
232
233 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
234 pte = *ptep;
235 if (!is_swap_pte(pte))
236 goto out;
237
238 entry = pte_to_swp_entry(pte);
239 if (!is_migration_entry(entry))
240 goto out;
241
242 page = migration_entry_to_page(entry);
243
244
245
246
247
248
249
250
251 if (!get_page_unless_zero(page))
252 goto out;
253 pte_unmap_unlock(ptep, ptl);
254 wait_on_page_locked(page);
255 put_page(page);
256 return;
257out:
258 pte_unmap_unlock(ptep, ptl);
259}
260
261
262
263
264
265
266
267
268
269static int migrate_page_move_mapping(struct address_space *mapping,
270 struct page *newpage, struct page *page)
271{
272 int expected_count;
273 void **pslot;
274
275 if (!mapping) {
276
277 if (page_count(page) != 1)
278 return -EAGAIN;
279 return 0;
280 }
281
282 spin_lock_irq(&mapping->tree_lock);
283
284 pslot = radix_tree_lookup_slot(&mapping->page_tree,
285 page_index(page));
286
287 expected_count = 2 + !!PagePrivate(page);
288 if (page_count(page) != expected_count ||
289 (struct page *)radix_tree_deref_slot(pslot) != page) {
290 spin_unlock_irq(&mapping->tree_lock);
291 return -EAGAIN;
292 }
293
294 if (!page_freeze_refs(page, expected_count)) {
295 spin_unlock_irq(&mapping->tree_lock);
296 return -EAGAIN;
297 }
298
299
300
301
302 get_page(newpage);
303#ifdef CONFIG_SWAP
304 if (PageSwapCache(page)) {
305 SetPageSwapCache(newpage);
306 set_page_private(newpage, page_private(page));
307 }
308#endif
309
310 radix_tree_replace_slot(pslot, newpage);
311
312 page_unfreeze_refs(page, expected_count);
313
314
315
316
317 __put_page(page);
318
319
320
321
322
323
324
325
326
327
328
329 __dec_zone_page_state(page, NR_FILE_PAGES);
330 __inc_zone_page_state(newpage, NR_FILE_PAGES);
331
332 spin_unlock_irq(&mapping->tree_lock);
333
334 return 0;
335}
336
337
338
339
340static void migrate_page_copy(struct page *newpage, struct page *page)
341{
342 int anon;
343
344 copy_highpage(newpage, page);
345
346 if (PageError(page))
347 SetPageError(newpage);
348 if (PageReferenced(page))
349 SetPageReferenced(newpage);
350 if (PageUptodate(page))
351 SetPageUptodate(newpage);
352 if (TestClearPageActive(page)) {
353 VM_BUG_ON(PageUnevictable(page));
354 SetPageActive(newpage);
355 } else
356 unevictable_migrate_page(newpage, page);
357 if (PageChecked(page))
358 SetPageChecked(newpage);
359 if (PageMappedToDisk(page))
360 SetPageMappedToDisk(newpage);
361
362 if (PageDirty(page)) {
363 clear_page_dirty_for_io(page);
364
365
366
367
368
369
370
371 __set_page_dirty_nobuffers(newpage);
372 }
373
374 mlock_migrate_page(newpage, page);
375
376#ifdef CONFIG_SWAP
377 ClearPageSwapCache(page);
378#endif
379 ClearPagePrivate(page);
380 set_page_private(page, 0);
381
382 anon = PageAnon(page);
383 page->mapping = NULL;
384
385 if (!anon)
386 mem_cgroup_uncharge_cache_page(page);
387
388
389
390
391
392 if (PageWriteback(newpage))
393 end_page_writeback(newpage);
394}
395
396
397
398
399
400
401int fail_migrate_page(struct address_space *mapping,
402 struct page *newpage, struct page *page)
403{
404 return -EIO;
405}
406EXPORT_SYMBOL(fail_migrate_page);
407
408
409
410
411
412
413
414int migrate_page(struct address_space *mapping,
415 struct page *newpage, struct page *page)
416{
417 int rc;
418
419 BUG_ON(PageWriteback(page));
420
421 rc = migrate_page_move_mapping(mapping, newpage, page);
422
423 if (rc)
424 return rc;
425
426 migrate_page_copy(newpage, page);
427 return 0;
428}
429EXPORT_SYMBOL(migrate_page);
430
431#ifdef CONFIG_BLOCK
432
433
434
435
436
437int buffer_migrate_page(struct address_space *mapping,
438 struct page *newpage, struct page *page)
439{
440 struct buffer_head *bh, *head;
441 int rc;
442
443 if (!page_has_buffers(page))
444 return migrate_page(mapping, newpage, page);
445
446 head = page_buffers(page);
447
448 rc = migrate_page_move_mapping(mapping, newpage, page);
449
450 if (rc)
451 return rc;
452
453 bh = head;
454 do {
455 get_bh(bh);
456 lock_buffer(bh);
457 bh = bh->b_this_page;
458
459 } while (bh != head);
460
461 ClearPagePrivate(page);
462 set_page_private(newpage, page_private(page));
463 set_page_private(page, 0);
464 put_page(page);
465 get_page(newpage);
466
467 bh = head;
468 do {
469 set_bh_page(bh, newpage, bh_offset(bh));
470 bh = bh->b_this_page;
471
472 } while (bh != head);
473
474 SetPagePrivate(newpage);
475
476 migrate_page_copy(newpage, page);
477
478 bh = head;
479 do {
480 unlock_buffer(bh);
481 put_bh(bh);
482 bh = bh->b_this_page;
483
484 } while (bh != head);
485
486 return 0;
487}
488EXPORT_SYMBOL(buffer_migrate_page);
489#endif
490
491
492
493
494static int writeout(struct address_space *mapping, struct page *page)
495{
496 struct writeback_control wbc = {
497 .sync_mode = WB_SYNC_NONE,
498 .nr_to_write = 1,
499 .range_start = 0,
500 .range_end = LLONG_MAX,
501 .nonblocking = 1,
502 .for_reclaim = 1
503 };
504 int rc;
505
506 if (!mapping->a_ops->writepage)
507
508 return -EINVAL;
509
510 if (!clear_page_dirty_for_io(page))
511
512 return -EAGAIN;
513
514
515
516
517
518
519
520
521
522 remove_migration_ptes(page, page);
523
524 rc = mapping->a_ops->writepage(page, &wbc);
525
526 if (rc != AOP_WRITEPAGE_ACTIVATE)
527
528 lock_page(page);
529
530 return (rc < 0) ? -EIO : -EAGAIN;
531}
532
533
534
535
536static int fallback_migrate_page(struct address_space *mapping,
537 struct page *newpage, struct page *page)
538{
539 if (PageDirty(page))
540 return writeout(mapping, page);
541
542
543
544
545
546 if (PagePrivate(page) &&
547 !try_to_release_page(page, GFP_KERNEL))
548 return -EAGAIN;
549
550 return migrate_page(mapping, newpage, page);
551}
552
553
554
555
556
557
558
559
560
561
562
563
564static int move_to_new_page(struct page *newpage, struct page *page)
565{
566 struct address_space *mapping;
567 int rc;
568
569
570
571
572
573
574 if (!trylock_page(newpage))
575 BUG();
576
577
578 newpage->index = page->index;
579 newpage->mapping = page->mapping;
580 if (PageSwapBacked(page))
581 SetPageSwapBacked(newpage);
582
583 mapping = page_mapping(page);
584 if (!mapping)
585 rc = migrate_page(mapping, newpage, page);
586 else if (mapping->a_ops->migratepage)
587
588
589
590
591
592
593
594 rc = mapping->a_ops->migratepage(mapping,
595 newpage, page);
596 else
597 rc = fallback_migrate_page(mapping, newpage, page);
598
599 if (!rc) {
600 remove_migration_ptes(page, newpage);
601 } else
602 newpage->mapping = NULL;
603
604 unlock_page(newpage);
605
606 return rc;
607}
608
609
610
611
612
613static int unmap_and_move(new_page_t get_new_page, unsigned long private,
614 struct page *page, int force)
615{
616 int rc = 0;
617 int *result = NULL;
618 struct page *newpage = get_new_page(page, private, &result);
619 int rcu_locked = 0;
620 int charge = 0;
621
622 if (!newpage)
623 return -ENOMEM;
624
625 if (page_count(page) == 1) {
626
627 goto move_newpage;
628 }
629
630 charge = mem_cgroup_prepare_migration(page, newpage);
631 if (charge == -ENOMEM) {
632 rc = -ENOMEM;
633 goto move_newpage;
634 }
635
636 BUG_ON(charge);
637
638 rc = -EAGAIN;
639 if (!trylock_page(page)) {
640 if (!force)
641 goto move_newpage;
642 lock_page(page);
643 }
644
645 if (PageWriteback(page)) {
646 if (!force)
647 goto unlock;
648 wait_on_page_writeback(page);
649 }
650
651
652
653
654
655
656
657
658 if (PageAnon(page)) {
659 rcu_read_lock();
660 rcu_locked = 1;
661 }
662
663
664
665
666
667
668
669
670
671
672
673
674
675 if (!page->mapping) {
676 if (!PageAnon(page) && PagePrivate(page)) {
677
678
679
680
681
682
683
684 try_to_free_buffers(page);
685 }
686 goto rcu_unlock;
687 }
688
689
690 try_to_unmap(page, 1);
691
692 if (!page_mapped(page))
693 rc = move_to_new_page(newpage, page);
694
695 if (rc)
696 remove_migration_ptes(page, page);
697rcu_unlock:
698 if (rcu_locked)
699 rcu_read_unlock();
700
701unlock:
702 unlock_page(page);
703
704 if (rc != -EAGAIN) {
705
706
707
708
709
710
711 list_del(&page->lru);
712 putback_lru_page(page);
713 }
714
715move_newpage:
716 if (!charge)
717 mem_cgroup_end_migration(newpage);
718
719
720
721
722
723 putback_lru_page(newpage);
724
725 if (result) {
726 if (rc)
727 *result = rc;
728 else
729 *result = page_to_nid(newpage);
730 }
731 return rc;
732}
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748int migrate_pages(struct list_head *from,
749 new_page_t get_new_page, unsigned long private)
750{
751 int retry = 1;
752 int nr_failed = 0;
753 int pass = 0;
754 struct page *page;
755 struct page *page2;
756 int swapwrite = current->flags & PF_SWAPWRITE;
757 int rc;
758
759 if (!swapwrite)
760 current->flags |= PF_SWAPWRITE;
761
762 for(pass = 0; pass < 10 && retry; pass++) {
763 retry = 0;
764
765 list_for_each_entry_safe(page, page2, from, lru) {
766 cond_resched();
767
768 rc = unmap_and_move(get_new_page, private,
769 page, pass > 2);
770
771 switch(rc) {
772 case -ENOMEM:
773 goto out;
774 case -EAGAIN:
775 retry++;
776 break;
777 case 0:
778 break;
779 default:
780
781 nr_failed++;
782 break;
783 }
784 }
785 }
786 rc = 0;
787out:
788 if (!swapwrite)
789 current->flags &= ~PF_SWAPWRITE;
790
791 putback_lru_pages(from);
792
793 if (rc)
794 return rc;
795
796 return nr_failed + retry;
797}
798
799#ifdef CONFIG_NUMA
800
801
802
803struct page_to_node {
804 unsigned long addr;
805 struct page *page;
806 int node;
807 int status;
808};
809
810static struct page *new_page_node(struct page *p, unsigned long private,
811 int **result)
812{
813 struct page_to_node *pm = (struct page_to_node *)private;
814
815 while (pm->node != MAX_NUMNODES && pm->page != p)
816 pm++;
817
818 if (pm->node == MAX_NUMNODES)
819 return NULL;
820
821 *result = &pm->status;
822
823 return alloc_pages_node(pm->node,
824 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
825}
826
827
828
829
830
831
832
833static int do_move_page_to_node_array(struct mm_struct *mm,
834 struct page_to_node *pm,
835 int migrate_all)
836{
837 int err;
838 struct page_to_node *pp;
839 LIST_HEAD(pagelist);
840
841 migrate_prep();
842 down_read(&mm->mmap_sem);
843
844
845
846
847 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
848 struct vm_area_struct *vma;
849 struct page *page;
850
851
852
853
854
855 pp->page = ZERO_PAGE(0);
856
857 err = -EFAULT;
858 vma = find_vma(mm, pp->addr);
859 if (!vma || !vma_migratable(vma))
860 goto set_status;
861
862 page = follow_page(vma, pp->addr, FOLL_GET);
863
864 err = PTR_ERR(page);
865 if (IS_ERR(page))
866 goto set_status;
867
868 err = -ENOENT;
869 if (!page)
870 goto set_status;
871
872 if (PageReserved(page))
873 goto put_and_set;
874
875 pp->page = page;
876 err = page_to_nid(page);
877
878 if (err == pp->node)
879
880
881
882 goto put_and_set;
883
884 err = -EACCES;
885 if (page_mapcount(page) > 1 &&
886 !migrate_all)
887 goto put_and_set;
888
889 err = isolate_lru_page(page);
890 if (!err)
891 list_add_tail(&page->lru, &pagelist);
892put_and_set:
893
894
895
896
897
898 put_page(page);
899set_status:
900 pp->status = err;
901 }
902
903 err = 0;
904 if (!list_empty(&pagelist))
905 err = migrate_pages(&pagelist, new_page_node,
906 (unsigned long)pm);
907
908 up_read(&mm->mmap_sem);
909 return err;
910}
911
912
913
914
915
916static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
917 unsigned long nr_pages,
918 const void __user * __user *pages,
919 const int __user *nodes,
920 int __user *status, int flags)
921{
922 struct page_to_node *pm = NULL;
923 nodemask_t task_nodes;
924 int err = 0;
925 int i;
926
927 task_nodes = cpuset_mems_allowed(task);
928
929
930 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
931 err = -E2BIG;
932 goto out;
933 }
934
935 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
936 if (!pm) {
937 err = -ENOMEM;
938 goto out;
939 }
940
941
942
943
944
945 for (i = 0; i < nr_pages; i++) {
946 const void __user *p;
947
948 err = -EFAULT;
949 if (get_user(p, pages + i))
950 goto out_pm;
951
952 pm[i].addr = (unsigned long)p;
953 if (nodes) {
954 int node;
955
956 if (get_user(node, nodes + i))
957 goto out_pm;
958
959 err = -ENODEV;
960 if (!node_state(node, N_HIGH_MEMORY))
961 goto out_pm;
962
963 err = -EACCES;
964 if (!node_isset(node, task_nodes))
965 goto out_pm;
966
967 pm[i].node = node;
968 } else
969 pm[i].node = 0;
970 }
971
972 pm[nr_pages].node = MAX_NUMNODES;
973
974 err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL);
975 if (err >= 0)
976
977 for (i = 0; i < nr_pages; i++)
978 if (put_user(pm[i].status, status + i))
979 err = -EFAULT;
980
981out_pm:
982 vfree(pm);
983out:
984 return err;
985}
986
987
988
989
990static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
991 const void __user **pages, int *status)
992{
993 unsigned long i;
994
995 down_read(&mm->mmap_sem);
996
997 for (i = 0; i < nr_pages; i++) {
998 unsigned long addr = (unsigned long)(*pages);
999 struct vm_area_struct *vma;
1000 struct page *page;
1001 int err = -EFAULT;
1002
1003 vma = find_vma(mm, addr);
1004 if (!vma)
1005 goto set_status;
1006
1007 page = follow_page(vma, addr, 0);
1008
1009 err = PTR_ERR(page);
1010 if (IS_ERR(page))
1011 goto set_status;
1012
1013 err = -ENOENT;
1014
1015 if (!page || PageReserved(page))
1016 goto set_status;
1017
1018 err = page_to_nid(page);
1019set_status:
1020 *status = err;
1021
1022 pages++;
1023 status++;
1024 }
1025
1026 up_read(&mm->mmap_sem);
1027}
1028
1029
1030
1031
1032
1033static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1034 const void __user * __user *pages,
1035 int __user *status)
1036{
1037#define DO_PAGES_STAT_CHUNK_NR 16
1038 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1039 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1040 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1041 int err;
1042
1043 for (i = 0; i < nr_pages; i += chunk_nr) {
1044 if (chunk_nr + i > nr_pages)
1045 chunk_nr = nr_pages - i;
1046
1047 err = copy_from_user(chunk_pages, &pages[i],
1048 chunk_nr * sizeof(*chunk_pages));
1049 if (err) {
1050 err = -EFAULT;
1051 goto out;
1052 }
1053
1054 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1055
1056 err = copy_to_user(&status[i], chunk_status,
1057 chunk_nr * sizeof(*chunk_status));
1058 if (err) {
1059 err = -EFAULT;
1060 goto out;
1061 }
1062 }
1063 err = 0;
1064
1065out:
1066 return err;
1067}
1068
1069
1070
1071
1072
1073asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
1074 const void __user * __user *pages,
1075 const int __user *nodes,
1076 int __user *status, int flags)
1077{
1078 struct task_struct *task;
1079 struct mm_struct *mm;
1080 int err;
1081
1082
1083 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1084 return -EINVAL;
1085
1086 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1087 return -EPERM;
1088
1089
1090 read_lock(&tasklist_lock);
1091 task = pid ? find_task_by_vpid(pid) : current;
1092 if (!task) {
1093 read_unlock(&tasklist_lock);
1094 return -ESRCH;
1095 }
1096 mm = get_task_mm(task);
1097 read_unlock(&tasklist_lock);
1098
1099 if (!mm)
1100 return -EINVAL;
1101
1102
1103
1104
1105
1106
1107
1108 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1109 (current->uid != task->suid) && (current->uid != task->uid) &&
1110 !capable(CAP_SYS_NICE)) {
1111 err = -EPERM;
1112 goto out;
1113 }
1114
1115 err = security_task_movememory(task);
1116 if (err)
1117 goto out;
1118
1119 if (nodes) {
1120 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1121 flags);
1122 } else {
1123 err = do_pages_stat(mm, nr_pages, pages, status);
1124 }
1125
1126out:
1127 mmput(mm);
1128 return err;
1129}
1130
1131
1132
1133
1134
1135
1136int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1137 const nodemask_t *from, unsigned long flags)
1138{
1139 struct vm_area_struct *vma;
1140 int err = 0;
1141
1142 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
1143 if (vma->vm_ops && vma->vm_ops->migrate) {
1144 err = vma->vm_ops->migrate(vma, to, from, flags);
1145 if (err)
1146 break;
1147 }
1148 }
1149 return err;
1150}
1151#endif