User: | Jiri Slaby |
Error type: | Double Lock |
Error type description: | Some lock is locked twice unintentionally in a sequence |
File location: | fs/ext4/mballoc.c |
Line in file: | 4189 |
Project: | Linux Kernel |
Project version: | 2.6.28 |
Tools: |
Undetermined 1
Stanse (1.2) |
Entered: | 2012-02-27 21:22:42 UTC |
4159 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4160 } 4161 if (ac) 4162 kmem_cache_free(ext4_ac_cachep, ac); 4163} 4164 4165/* 4166 * We have incremented pa_count. So it cannot be freed at this 4167 * point. Also we hold lg_mutex. So no parallel allocation is 4168 * possible from this lg. That means pa_free cannot be updated. 4169 * 4170 * A parallel ext4_mb_discard_group_preallocations is possible. 4171 * which can cause the lg_prealloc_list to be updated. 4172 */ 4173 4174static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4175{ 4176 int order, added = 0, lg_prealloc_count = 1; 4177 struct super_block *sb = ac->ac_sb; 4178 struct ext4_locality_group *lg = ac->ac_lg; 4179 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4180 4181 order = fls(pa->pa_free) - 1; 4182 if (order > PREALLOC_TB_SIZE - 1) 4183 /* The max size of hash table is PREALLOC_TB_SIZE */ 4184 order = PREALLOC_TB_SIZE - 1; 4185 /* Add the prealloc space to lg */ 4186 rcu_read_lock(); 4187 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4188 pa_inode_list) { 4189 spin_lock(&tmp_pa->pa_lock); 4190 if (tmp_pa->pa_deleted) { 4191 spin_unlock(&pa->pa_lock); 4192 continue; 4193 } 4194 if (!added && pa->pa_free < tmp_pa->pa_free) { 4195 /* Add to the tail of the previous entry */ 4196 list_add_tail_rcu(&pa->pa_inode_list, 4197 &tmp_pa->pa_inode_list); 4198 added = 1; 4199 /*