930 goto out;
931 }
932 error = 1;
933
934 spin_lock(&info->lock);
935 ptr = shmem_swp_entry(info, idx, NULL);
936 if (ptr && ptr->val == entry.val) {
937 error = add_to_page_cache_locked(page, inode->i_mapping,
938 idx, GFP_NOWAIT);
939
940 } else
941 mem_cgroup_uncharge_cache_page(page);
942
943 if (error == -EEXIST) {
944 struct page *filepage = find_get_page(inode->i_mapping, idx);
945 error = 1;
946 if (filepage) {
947
948
949
950
951 if (PageUptodate(filepage))
952 error = 0;
953 page_cache_release(filepage);
954 }
955 }
956 if (!error) {
957 delete_from_swap_cache(page);
958 set_page_dirty(page);
959 info->flags |= SHMEM_PAGEIN;
960 shmem_swp_set(info, ptr, 0);
961 swap_free(entry);
962 error = 1;
963 }
964 if (ptr)
965 shmem_swp_unmap(ptr);
966 spin_unlock(&info->lock);
967 radix_tree_preload_end();
968out:
969 unlock_page(page);
970 page_cache_release(page);