3513
3514
3515 local_irq_save(flags);
3516 double_rq_lock(this_rq, busiest);
3517 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3518 imbalance, sd, idle, &all_pinned);
3519 double_rq_unlock(this_rq, busiest);
3520 local_irq_restore(flags);
3521
3522
3523
3524
3525 if (ld_moved && this_cpu != smp_processor_id())
3526 resched_cpu(this_cpu);
3527
3528
3529 if (unlikely(all_pinned)) {
3530 cpu_clear(cpu_of(busiest), *cpus);
3531 if (!cpus_empty(*cpus))
3532 goto redo;
3533 goto out_balanced;
3534 }
3535 }
3536
3537 if (!ld_moved) {
3538 schedstat_inc(sd, lb_failed[idle]);
3539 sd->nr_balance_failed++;
3540
3541 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
3542
3543 spin_lock_irqsave(&busiest->lock, flags);
3544
3545
3546
3547
3548 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
3549 spin_unlock_irqrestore(&busiest->lock, flags);
3550 all_pinned = 1;
3551 goto out_one_pinned;
3552 }
3553