1525 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1526 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1527 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1528 u64 rem_load, moved_load;
1529
1530
1531
1532
1533 if (!busiest_cfs_rq->task_weight)
1534 continue;
1535
1536 rem_load = (u64)rem_load_move * busiest_weight;
1537 rem_load = div_u64(rem_load, busiest_h_load + 1);
1538
1539 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1540 rem_load, sd, idle, all_pinned, this_best_prio,
1541 tg->cfs_rq[busiest_cpu]);
1542
1543 if (!moved_load)
1544 continue;
1545
1546 moved_load *= busiest_h_load;
1547 moved_load = div_u64(moved_load, busiest_weight + 1);
1548
1549 rem_load_move -= moved_load;
1550 if (rem_load_move < 0)
1551 break;
1552 }
1553 rcu_read_unlock();
1554
1555 return max_load_move - rem_load_move;
1556}
1557#else
1558static unsigned long
1559load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1560 unsigned long max_load_move,
1561 struct sched_domain *sd, enum cpu_idle_type idle,
1562 int *all_pinned, int *this_best_prio)
1563{
1564 return __load_balance_fair(this_rq, this_cpu, busiest,
1565 max_load_move, sd, idle, all_pinned,