User: | Jiri Slaby |
Error type: | Double Lock |
Error type description: | Some lock is locked twice unintentionally in a sequence |
File location: | kernel/sched.c |
Line in file: | 945 |
Project: | Linux Kernel |
Project version: | 2.6.28 |
Tools: |
Stanse
(1.2)
|
Entered: | 2012-04-29 14:49:11 UTC |
915 spin_unlock(&rq->lock); 916#endif 917} 918 919static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 920{ 921#ifdef CONFIG_SMP 922 /* 923 * After ->oncpu is cleared, the task can be moved to a different CPU. 924 * We must ensure this doesn't happen until the switch is completely 925 * finished. 926 */ 927 smp_wmb(); 928 prev->oncpu = 0; 929#endif 930#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW 931 local_irq_enable(); 932#endif 933} 934#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 935 936/* 937 * __task_rq_lock - lock the runqueue a given task resides on. 938 * Must be called interrupts disabled. 939 */ 940static inline struct rq *__task_rq_lock(struct task_struct *p) 941 __acquires(rq->lock) 942{ 943 for (;;) { 944 struct rq *rq = task_rq(p); 945 spin_lock(&rq->lock); 946 if (likely(rq == task_rq(p))) 947 return rq; 948 spin_unlock(&rq->lock); 949 } 950} 951 952/* 953 * task_rq_lock - lock the runqueue a given task resides on and disable 954 * interrupts. Note the ordering: we can safely lookup the task_rq without 955 * explicitly disabling preemption.