User: | Jiri Slaby |
Error type: | Leaving function in locked state |
Error type description: | Some lock is not unlocked on all paths of a function, so it is leaked |
File location: | mm/mremap.c |
Line in file: | 127 |
Project: | Linux Kernel |
Project version: | 2.6.28 |
Tools: |
Stanse
(1.2)
Undetermined 1 |
Entered: | 2012-03-02 21:35:17 UTC |
97 } 98 99 /* 100 * We don't have to worry about the ordering of src and dst 101 * pte locks because exclusive mmap_sem prevents deadlock. 102 */ 103 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 104 new_pte = pte_offset_map_nested(new_pmd, new_addr); 105 new_ptl = pte_lockptr(mm, new_pmd); 106 if (new_ptl != old_ptl) 107 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 108 arch_enter_lazy_mmu_mode(); 109 110 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 111 new_pte++, new_addr += PAGE_SIZE) { 112 if (pte_none(*old_pte)) 113 continue; 114 pte = ptep_clear_flush(vma, old_addr, old_pte); 115 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 116 set_pte_at(mm, new_addr, new_pte, pte); 117 } 118 119 arch_leave_lazy_mmu_mode(); 120 if (new_ptl != old_ptl) 121 spin_unlock(new_ptl); 122 pte_unmap_nested(new_pte - 1); 123 pte_unmap_unlock(old_pte - 1, old_ptl); 124 if (mapping) 125 spin_unlock(&mapping->i_mmap_lock); 126 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); 127} 128 129#define LATENCY_LIMIT (64 * PAGE_SIZE) 130 131unsigned long move_page_tables(struct vm_area_struct *vma, 132 unsigned long old_addr, struct vm_area_struct *new_vma, 133 unsigned long new_addr, unsigned long len) 134{ 135 unsigned long extent, next, old_end; 136 pmd_t *old_pmd, *new_pmd; 137