Showing error 1598

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: kernel/sched_fair.c
Line in file: 1555
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2012-05-29 20:11:37 UTC


Source:

   1/*
   2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
   3 *
   4 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5 *
   6 *  Interactivity improvements by Mike Galbraith
   7 *  (C) 2007 Mike Galbraith <efault@gmx.de>
   8 *
   9 *  Various enhancements by Dmitry Adamushko.
  10 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11 *
  12 *  Group scheduling enhancements by Srivatsa Vaddagiri
  13 *  Copyright IBM Corporation, 2007
  14 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15 *
  16 *  Scaled math optimizations by Thomas Gleixner
  17 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18 *
  19 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21 */
  22
  23#include <linux/latencytop.h>
  24
  25/*
  26 * Targeted preemption latency for CPU-bound tasks:
  27 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
  28 *
  29 * NOTE: this latency value is not the same as the concept of
  30 * 'timeslice length' - timeslices in CFS are of variable length
  31 * and have no persistent notion like in traditional, time-slice
  32 * based scheduling concepts.
  33 *
  34 * (to see the precise effective timeslice length of your workload,
  35 *  run vmstat and monitor the context-switches (cs) field)
  36 */
  37unsigned int sysctl_sched_latency = 20000000ULL;
  38
  39/*
  40 * Minimal preemption granularity for CPU-bound tasks:
  41 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
  42 */
  43unsigned int sysctl_sched_min_granularity = 4000000ULL;
  44
  45/*
  46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  47 */
  48static unsigned int sched_nr_latency = 5;
  49
  50/*
  51 * After fork, child runs first. (default) If set to 0 then
  52 * parent will (try to) run first.
  53 */
  54const_debug unsigned int sysctl_sched_child_runs_first = 1;
  55
  56/*
  57 * sys_sched_yield() compat mode
  58 *
  59 * This option switches the agressive yield implementation of the
  60 * old scheduler back on.
  61 */
  62unsigned int __read_mostly sysctl_sched_compat_yield;
  63
  64/*
  65 * SCHED_OTHER wake-up granularity.
  66 * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
  67 *
  68 * This option delays the preemption effects of decoupled workloads
  69 * and reduces their over-scheduling. Synchronous workloads will still
  70 * have immediate wakeup/sleep latencies.
  71 */
  72unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
  73
  74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  75
  76static const struct sched_class fair_sched_class;
  77
  78/**************************************************************
  79 * CFS operations on generic schedulable entities:
  80 */
  81
  82static inline struct task_struct *task_of(struct sched_entity *se)
  83{
  84        return container_of(se, struct task_struct, se);
  85}
  86
  87#ifdef CONFIG_FAIR_GROUP_SCHED
  88
  89/* cpu runqueue to which this cfs_rq is attached */
  90static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  91{
  92        return cfs_rq->rq;
  93}
  94
  95/* An entity is a task if it doesn't "own" a runqueue */
  96#define entity_is_task(se)        (!se->my_q)
  97
  98/* Walk up scheduling entities hierarchy */
  99#define for_each_sched_entity(se) \
 100                for (; se; se = se->parent)
 101
 102static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 103{
 104        return p->se.cfs_rq;
 105}
 106
 107/* runqueue on which this entity is (to be) queued */
 108static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 109{
 110        return se->cfs_rq;
 111}
 112
 113/* runqueue "owned" by this group */
 114static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 115{
 116        return grp->my_q;
 117}
 118
 119/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
 120 * another cpu ('this_cpu')
 121 */
 122static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
 123{
 124        return cfs_rq->tg->cfs_rq[this_cpu];
 125}
 126
 127/* Iterate thr' all leaf cfs_rq's on a runqueue */
 128#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 129        list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 130
 131/* Do the two (enqueued) entities belong to the same group ? */
 132static inline int
 133is_same_group(struct sched_entity *se, struct sched_entity *pse)
 134{
 135        if (se->cfs_rq == pse->cfs_rq)
 136                return 1;
 137
 138        return 0;
 139}
 140
 141static inline struct sched_entity *parent_entity(struct sched_entity *se)
 142{
 143        return se->parent;
 144}
 145
 146/* return depth at which a sched entity is present in the hierarchy */
 147static inline int depth_se(struct sched_entity *se)
 148{
 149        int depth = 0;
 150
 151        for_each_sched_entity(se)
 152                depth++;
 153
 154        return depth;
 155}
 156
 157static void
 158find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 159{
 160        int se_depth, pse_depth;
 161
 162        /*
 163         * preemption test can be made between sibling entities who are in the
 164         * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
 165         * both tasks until we find their ancestors who are siblings of common
 166         * parent.
 167         */
 168
 169        /* First walk up until both entities are at same depth */
 170        se_depth = depth_se(*se);
 171        pse_depth = depth_se(*pse);
 172
 173        while (se_depth > pse_depth) {
 174                se_depth--;
 175                *se = parent_entity(*se);
 176        }
 177
 178        while (pse_depth > se_depth) {
 179                pse_depth--;
 180                *pse = parent_entity(*pse);
 181        }
 182
 183        while (!is_same_group(*se, *pse)) {
 184                *se = parent_entity(*se);
 185                *pse = parent_entity(*pse);
 186        }
 187}
 188
 189#else        /* CONFIG_FAIR_GROUP_SCHED */
 190
 191static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 192{
 193        return container_of(cfs_rq, struct rq, cfs);
 194}
 195
 196#define entity_is_task(se)        1
 197
 198#define for_each_sched_entity(se) \
 199                for (; se; se = NULL)
 200
 201static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 202{
 203        return &task_rq(p)->cfs;
 204}
 205
 206static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 207{
 208        struct task_struct *p = task_of(se);
 209        struct rq *rq = task_rq(p);
 210
 211        return &rq->cfs;
 212}
 213
 214/* runqueue "owned" by this group */
 215static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 216{
 217        return NULL;
 218}
 219
 220static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
 221{
 222        return &cpu_rq(this_cpu)->cfs;
 223}
 224
 225#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 226                for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 227
 228static inline int
 229is_same_group(struct sched_entity *se, struct sched_entity *pse)
 230{
 231        return 1;
 232}
 233
 234static inline struct sched_entity *parent_entity(struct sched_entity *se)
 235{
 236        return NULL;
 237}
 238
 239static inline void
 240find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 241{
 242}
 243
 244#endif        /* CONFIG_FAIR_GROUP_SCHED */
 245
 246
 247/**************************************************************
 248 * Scheduling class tree data structure manipulation methods:
 249 */
 250
 251static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
 252{
 253        s64 delta = (s64)(vruntime - min_vruntime);
 254        if (delta > 0)
 255                min_vruntime = vruntime;
 256
 257        return min_vruntime;
 258}
 259
 260static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
 261{
 262        s64 delta = (s64)(vruntime - min_vruntime);
 263        if (delta < 0)
 264                min_vruntime = vruntime;
 265
 266        return min_vruntime;
 267}
 268
 269static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 270{
 271        return se->vruntime - cfs_rq->min_vruntime;
 272}
 273
 274static void update_min_vruntime(struct cfs_rq *cfs_rq)
 275{
 276        u64 vruntime = cfs_rq->min_vruntime;
 277
 278        if (cfs_rq->curr)
 279                vruntime = cfs_rq->curr->vruntime;
 280
 281        if (cfs_rq->rb_leftmost) {
 282                struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
 283                                                   struct sched_entity,
 284                                                   run_node);
 285
 286                if (vruntime == cfs_rq->min_vruntime)
 287                        vruntime = se->vruntime;
 288                else
 289                        vruntime = min_vruntime(vruntime, se->vruntime);
 290        }
 291
 292        cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
 293}
 294
 295/*
 296 * Enqueue an entity into the rb-tree:
 297 */
 298static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 299{
 300        struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
 301        struct rb_node *parent = NULL;
 302        struct sched_entity *entry;
 303        s64 key = entity_key(cfs_rq, se);
 304        int leftmost = 1;
 305
 306        /*
 307         * Find the right place in the rbtree:
 308         */
 309        while (*link) {
 310                parent = *link;
 311                entry = rb_entry(parent, struct sched_entity, run_node);
 312                /*
 313                 * We dont care about collisions. Nodes with
 314                 * the same key stay together.
 315                 */
 316                if (key < entity_key(cfs_rq, entry)) {
 317                        link = &parent->rb_left;
 318                } else {
 319                        link = &parent->rb_right;
 320                        leftmost = 0;
 321                }
 322        }
 323
 324        /*
 325         * Maintain a cache of leftmost tree entries (it is frequently
 326         * used):
 327         */
 328        if (leftmost)
 329                cfs_rq->rb_leftmost = &se->run_node;
 330
 331        rb_link_node(&se->run_node, parent, link);
 332        rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
 333}
 334
 335static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 336{
 337        if (cfs_rq->rb_leftmost == &se->run_node) {
 338                struct rb_node *next_node;
 339
 340                next_node = rb_next(&se->run_node);
 341                cfs_rq->rb_leftmost = next_node;
 342        }
 343
 344        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 345}
 346
 347static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
 348{
 349        struct rb_node *left = cfs_rq->rb_leftmost;
 350
 351        if (!left)
 352                return NULL;
 353
 354        return rb_entry(left, struct sched_entity, run_node);
 355}
 356
 357static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 358{
 359        struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
 360
 361        if (!last)
 362                return NULL;
 363
 364        return rb_entry(last, struct sched_entity, run_node);
 365}
 366
 367/**************************************************************
 368 * Scheduling class statistics methods:
 369 */
 370
 371#ifdef CONFIG_SCHED_DEBUG
 372int sched_nr_latency_handler(struct ctl_table *table, int write,
 373                struct file *filp, void __user *buffer, size_t *lenp,
 374                loff_t *ppos)
 375{
 376        int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
 377
 378        if (ret || !write)
 379                return ret;
 380
 381        sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
 382                                        sysctl_sched_min_granularity);
 383
 384        return 0;
 385}
 386#endif
 387
 388/*
 389 * delta *= P[w / rw]
 390 */
 391static inline unsigned long
 392calc_delta_weight(unsigned long delta, struct sched_entity *se)
 393{
 394        for_each_sched_entity(se) {
 395                delta = calc_delta_mine(delta,
 396                                se->load.weight, &cfs_rq_of(se)->load);
 397        }
 398
 399        return delta;
 400}
 401
 402/*
 403 * delta /= w
 404 */
 405static inline unsigned long
 406calc_delta_fair(unsigned long delta, struct sched_entity *se)
 407{
 408        if (unlikely(se->load.weight != NICE_0_LOAD))
 409                delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
 410
 411        return delta;
 412}
 413
 414/*
 415 * The idea is to set a period in which each task runs once.
 416 *
 417 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
 418 * this period because otherwise the slices get too small.
 419 *
 420 * p = (nr <= nl) ? l : l*nr/nl
 421 */
 422static u64 __sched_period(unsigned long nr_running)
 423{
 424        u64 period = sysctl_sched_latency;
 425        unsigned long nr_latency = sched_nr_latency;
 426
 427        if (unlikely(nr_running > nr_latency)) {
 428                period = sysctl_sched_min_granularity;
 429                period *= nr_running;
 430        }
 431
 432        return period;
 433}
 434
 435/*
 436 * We calculate the wall-time slice from the period by taking a part
 437 * proportional to the weight.
 438 *
 439 * s = p*P[w/rw]
 440 */
 441static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 442{
 443        unsigned long nr_running = cfs_rq->nr_running;
 444
 445        if (unlikely(!se->on_rq))
 446                nr_running++;
 447
 448        return calc_delta_weight(__sched_period(nr_running), se);
 449}
 450
 451/*
 452 * We calculate the vruntime slice of a to be inserted task
 453 *
 454 * vs = s/w
 455 */
 456static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 457{
 458        return calc_delta_fair(sched_slice(cfs_rq, se), se);
 459}
 460
 461/*
 462 * Update the current task's runtime statistics. Skip current tasks that
 463 * are not in our scheduling class.
 464 */
 465static inline void
 466__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
 467              unsigned long delta_exec)
 468{
 469        unsigned long delta_exec_weighted;
 470
 471        schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
 472
 473        curr->sum_exec_runtime += delta_exec;
 474        schedstat_add(cfs_rq, exec_clock, delta_exec);
 475        delta_exec_weighted = calc_delta_fair(delta_exec, curr);
 476        curr->vruntime += delta_exec_weighted;
 477        update_min_vruntime(cfs_rq);
 478}
 479
 480static void update_curr(struct cfs_rq *cfs_rq)
 481{
 482        struct sched_entity *curr = cfs_rq->curr;
 483        u64 now = rq_of(cfs_rq)->clock;
 484        unsigned long delta_exec;
 485
 486        if (unlikely(!curr))
 487                return;
 488
 489        /*
 490         * Get the amount of time the current task was running
 491         * since the last time we changed load (this cannot
 492         * overflow on 32 bits):
 493         */
 494        delta_exec = (unsigned long)(now - curr->exec_start);
 495
 496        __update_curr(cfs_rq, curr, delta_exec);
 497        curr->exec_start = now;
 498
 499        if (entity_is_task(curr)) {
 500                struct task_struct *curtask = task_of(curr);
 501
 502                cpuacct_charge(curtask, delta_exec);
 503                account_group_exec_runtime(curtask, delta_exec);
 504        }
 505}
 506
 507static inline void
 508update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 509{
 510        schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
 511}
 512
 513/*
 514 * Task is being enqueued - update stats:
 515 */
 516static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 517{
 518        /*
 519         * Are we enqueueing a waiting task? (for current tasks
 520         * a dequeue/enqueue event is a NOP)
 521         */
 522        if (se != cfs_rq->curr)
 523                update_stats_wait_start(cfs_rq, se);
 524}
 525
 526static void
 527update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 528{
 529        schedstat_set(se->wait_max, max(se->wait_max,
 530                        rq_of(cfs_rq)->clock - se->wait_start));
 531        schedstat_set(se->wait_count, se->wait_count + 1);
 532        schedstat_set(se->wait_sum, se->wait_sum +
 533                        rq_of(cfs_rq)->clock - se->wait_start);
 534        schedstat_set(se->wait_start, 0);
 535}
 536
 537static inline void
 538update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 539{
 540        /*
 541         * Mark the end of the wait period if dequeueing a
 542         * waiting task:
 543         */
 544        if (se != cfs_rq->curr)
 545                update_stats_wait_end(cfs_rq, se);
 546}
 547
 548/*
 549 * We are picking a new current task - update its stats:
 550 */
 551static inline void
 552update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 553{
 554        /*
 555         * We are starting a new run period:
 556         */
 557        se->exec_start = rq_of(cfs_rq)->clock;
 558}
 559
 560/**************************************************
 561 * Scheduling class queueing methods:
 562 */
 563
 564#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
 565static void
 566add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 567{
 568        cfs_rq->task_weight += weight;
 569}
 570#else
 571static inline void
 572add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 573{
 574}
 575#endif
 576
 577static void
 578account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 579{
 580        update_load_add(&cfs_rq->load, se->load.weight);
 581        if (!parent_entity(se))
 582                inc_cpu_load(rq_of(cfs_rq), se->load.weight);
 583        if (entity_is_task(se)) {
 584                add_cfs_task_weight(cfs_rq, se->load.weight);
 585                list_add(&se->group_node, &cfs_rq->tasks);
 586        }
 587        cfs_rq->nr_running++;
 588        se->on_rq = 1;
 589}
 590
 591static void
 592account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 593{
 594        update_load_sub(&cfs_rq->load, se->load.weight);
 595        if (!parent_entity(se))
 596                dec_cpu_load(rq_of(cfs_rq), se->load.weight);
 597        if (entity_is_task(se)) {
 598                add_cfs_task_weight(cfs_rq, -se->load.weight);
 599                list_del_init(&se->group_node);
 600        }
 601        cfs_rq->nr_running--;
 602        se->on_rq = 0;
 603}
 604
 605static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 606{
 607#ifdef CONFIG_SCHEDSTATS
 608        if (se->sleep_start) {
 609                u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
 610                struct task_struct *tsk = task_of(se);
 611
 612                if ((s64)delta < 0)
 613                        delta = 0;
 614
 615                if (unlikely(delta > se->sleep_max))
 616                        se->sleep_max = delta;
 617
 618                se->sleep_start = 0;
 619                se->sum_sleep_runtime += delta;
 620
 621                account_scheduler_latency(tsk, delta >> 10, 1);
 622        }
 623        if (se->block_start) {
 624                u64 delta = rq_of(cfs_rq)->clock - se->block_start;
 625                struct task_struct *tsk = task_of(se);
 626
 627                if ((s64)delta < 0)
 628                        delta = 0;
 629
 630                if (unlikely(delta > se->block_max))
 631                        se->block_max = delta;
 632
 633                se->block_start = 0;
 634                se->sum_sleep_runtime += delta;
 635
 636                /*
 637                 * Blocking time is in units of nanosecs, so shift by 20 to
 638                 * get a milliseconds-range estimation of the amount of
 639                 * time that the task spent sleeping:
 640                 */
 641                if (unlikely(prof_on == SLEEP_PROFILING)) {
 642
 643                        profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
 644                                     delta >> 20);
 645                }
 646                account_scheduler_latency(tsk, delta >> 10, 0);
 647        }
 648#endif
 649}
 650
 651static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 652{
 653#ifdef CONFIG_SCHED_DEBUG
 654        s64 d = se->vruntime - cfs_rq->min_vruntime;
 655
 656        if (d < 0)
 657                d = -d;
 658
 659        if (d > 3*sysctl_sched_latency)
 660                schedstat_inc(cfs_rq, nr_spread_over);
 661#endif
 662}
 663
 664static void
 665place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 666{
 667        u64 vruntime = cfs_rq->min_vruntime;
 668
 669        /*
 670         * The 'current' period is already promised to the current tasks,
 671         * however the extra weight of the new task will slow them down a
 672         * little, place the new task so that it fits in the slot that
 673         * stays open at the end.
 674         */
 675        if (initial && sched_feat(START_DEBIT))
 676                vruntime += sched_vslice(cfs_rq, se);
 677
 678        if (!initial) {
 679                /* sleeps upto a single latency don't count. */
 680                if (sched_feat(NEW_FAIR_SLEEPERS)) {
 681                        unsigned long thresh = sysctl_sched_latency;
 682
 683                        /*
 684                         * convert the sleeper threshold into virtual time
 685                         */
 686                        if (sched_feat(NORMALIZED_SLEEPER))
 687                                thresh = calc_delta_fair(thresh, se);
 688
 689                        vruntime -= thresh;
 690                }
 691
 692                /* ensure we never gain time by being placed backwards. */
 693                vruntime = max_vruntime(se->vruntime, vruntime);
 694        }
 695
 696        se->vruntime = vruntime;
 697}
 698
 699static void
 700enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
 701{
 702        /*
 703         * Update run-time statistics of the 'current'.
 704         */
 705        update_curr(cfs_rq);
 706        account_entity_enqueue(cfs_rq, se);
 707
 708        if (wakeup) {
 709                place_entity(cfs_rq, se, 0);
 710                enqueue_sleeper(cfs_rq, se);
 711        }
 712
 713        update_stats_enqueue(cfs_rq, se);
 714        check_spread(cfs_rq, se);
 715        if (se != cfs_rq->curr)
 716                __enqueue_entity(cfs_rq, se);
 717}
 718
 719static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 720{
 721        if (cfs_rq->last == se)
 722                cfs_rq->last = NULL;
 723
 724        if (cfs_rq->next == se)
 725                cfs_rq->next = NULL;
 726}
 727
 728static void
 729dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 730{
 731        /*
 732         * Update run-time statistics of the 'current'.
 733         */
 734        update_curr(cfs_rq);
 735
 736        update_stats_dequeue(cfs_rq, se);
 737        if (sleep) {
 738#ifdef CONFIG_SCHEDSTATS
 739                if (entity_is_task(se)) {
 740                        struct task_struct *tsk = task_of(se);
 741
 742                        if (tsk->state & TASK_INTERRUPTIBLE)
 743                                se->sleep_start = rq_of(cfs_rq)->clock;
 744                        if (tsk->state & TASK_UNINTERRUPTIBLE)
 745                                se->block_start = rq_of(cfs_rq)->clock;
 746                }
 747#endif
 748        }
 749
 750        clear_buddies(cfs_rq, se);
 751
 752        if (se != cfs_rq->curr)
 753                __dequeue_entity(cfs_rq, se);
 754        account_entity_dequeue(cfs_rq, se);
 755        update_min_vruntime(cfs_rq);
 756}
 757
 758/*
 759 * Preempt the current task with a newly woken task if needed:
 760 */
 761static void
 762check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 763{
 764        unsigned long ideal_runtime, delta_exec;
 765
 766        ideal_runtime = sched_slice(cfs_rq, curr);
 767        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
 768        if (delta_exec > ideal_runtime)
 769                resched_task(rq_of(cfs_rq)->curr);
 770}
 771
 772static void
 773set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 774{
 775        /* 'current' is not kept within the tree. */
 776        if (se->on_rq) {
 777                /*
 778                 * Any task has to be enqueued before it get to execute on
 779                 * a CPU. So account for the time it spent waiting on the
 780                 * runqueue.
 781                 */
 782                update_stats_wait_end(cfs_rq, se);
 783                __dequeue_entity(cfs_rq, se);
 784        }
 785
 786        update_stats_curr_start(cfs_rq, se);
 787        cfs_rq->curr = se;
 788#ifdef CONFIG_SCHEDSTATS
 789        /*
 790         * Track our maximum slice length, if the CPU's load is at
 791         * least twice that of our own weight (i.e. dont track it
 792         * when there are only lesser-weight tasks around):
 793         */
 794        if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
 795                se->slice_max = max(se->slice_max,
 796                        se->sum_exec_runtime - se->prev_sum_exec_runtime);
 797        }
 798#endif
 799        se->prev_sum_exec_runtime = se->sum_exec_runtime;
 800}
 801
 802static int
 803wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
 804
 805static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 806{
 807        struct sched_entity *se = __pick_next_entity(cfs_rq);
 808
 809        if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
 810                return cfs_rq->next;
 811
 812        if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
 813                return cfs_rq->last;
 814
 815        return se;
 816}
 817
 818static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 819{
 820        /*
 821         * If still on the runqueue then deactivate_task()
 822         * was not called and update_curr() has to be done:
 823         */
 824        if (prev->on_rq)
 825                update_curr(cfs_rq);
 826
 827        check_spread(cfs_rq, prev);
 828        if (prev->on_rq) {
 829                update_stats_wait_start(cfs_rq, prev);
 830                /* Put 'current' back into the tree. */
 831                __enqueue_entity(cfs_rq, prev);
 832        }
 833        cfs_rq->curr = NULL;
 834}
 835
 836static void
 837entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 838{
 839        /*
 840         * Update run-time statistics of the 'current'.
 841         */
 842        update_curr(cfs_rq);
 843
 844#ifdef CONFIG_SCHED_HRTICK
 845        /*
 846         * queued ticks are scheduled to match the slice, so don't bother
 847         * validating it and just reschedule.
 848         */
 849        if (queued) {
 850                resched_task(rq_of(cfs_rq)->curr);
 851                return;
 852        }
 853        /*
 854         * don't let the period tick interfere with the hrtick preemption
 855         */
 856        if (!sched_feat(DOUBLE_TICK) &&
 857                        hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
 858                return;
 859#endif
 860
 861        if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
 862                check_preempt_tick(cfs_rq, curr);
 863}
 864
 865/**************************************************
 866 * CFS operations on tasks:
 867 */
 868
 869#ifdef CONFIG_SCHED_HRTICK
 870static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 871{
 872        struct sched_entity *se = &p->se;
 873        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 874
 875        WARN_ON(task_rq(p) != rq);
 876
 877        if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
 878                u64 slice = sched_slice(cfs_rq, se);
 879                u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 880                s64 delta = slice - ran;
 881
 882                if (delta < 0) {
 883                        if (rq->curr == p)
 884                                resched_task(p);
 885                        return;
 886                }
 887
 888                /*
 889                 * Don't schedule slices shorter than 10000ns, that just
 890                 * doesn't make sense. Rely on vruntime for fairness.
 891                 */
 892                if (rq->curr != p)
 893                        delta = max_t(s64, 10000LL, delta);
 894
 895                hrtick_start(rq, delta);
 896        }
 897}
 898
 899/*
 900 * called from enqueue/dequeue and updates the hrtick when the
 901 * current task is from our class and nr_running is low enough
 902 * to matter.
 903 */
 904static void hrtick_update(struct rq *rq)
 905{
 906        struct task_struct *curr = rq->curr;
 907
 908        if (curr->sched_class != &fair_sched_class)
 909                return;
 910
 911        if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
 912                hrtick_start_fair(rq, curr);
 913}
 914#else /* !CONFIG_SCHED_HRTICK */
 915static inline void
 916hrtick_start_fair(struct rq *rq, struct task_struct *p)
 917{
 918}
 919
 920static inline void hrtick_update(struct rq *rq)
 921{
 922}
 923#endif
 924
 925/*
 926 * The enqueue_task method is called before nr_running is
 927 * increased. Here we update the fair scheduling stats and
 928 * then put the task into the rbtree:
 929 */
 930static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 931{
 932        struct cfs_rq *cfs_rq;
 933        struct sched_entity *se = &p->se;
 934
 935        for_each_sched_entity(se) {
 936                if (se->on_rq)
 937                        break;
 938                cfs_rq = cfs_rq_of(se);
 939                enqueue_entity(cfs_rq, se, wakeup);
 940                wakeup = 1;
 941        }
 942
 943        hrtick_update(rq);
 944}
 945
 946/*
 947 * The dequeue_task method is called before nr_running is
 948 * decreased. We remove the task from the rbtree and
 949 * update the fair scheduling stats:
 950 */
 951static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 952{
 953        struct cfs_rq *cfs_rq;
 954        struct sched_entity *se = &p->se;
 955
 956        for_each_sched_entity(se) {
 957                cfs_rq = cfs_rq_of(se);
 958                dequeue_entity(cfs_rq, se, sleep);
 959                /* Don't dequeue parent if it has other entities besides us */
 960                if (cfs_rq->load.weight)
 961                        break;
 962                sleep = 1;
 963        }
 964
 965        hrtick_update(rq);
 966}
 967
 968/*
 969 * sched_yield() support is very simple - we dequeue and enqueue.
 970 *
 971 * If compat_yield is turned on then we requeue to the end of the tree.
 972 */
 973static void yield_task_fair(struct rq *rq)
 974{
 975        struct task_struct *curr = rq->curr;
 976        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
 977        struct sched_entity *rightmost, *se = &curr->se;
 978
 979        /*
 980         * Are we the only task in the tree?
 981         */
 982        if (unlikely(cfs_rq->nr_running == 1))
 983                return;
 984
 985        clear_buddies(cfs_rq, se);
 986
 987        if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
 988                update_rq_clock(rq);
 989                /*
 990                 * Update run-time statistics of the 'current'.
 991                 */
 992                update_curr(cfs_rq);
 993
 994                return;
 995        }
 996        /*
 997         * Find the rightmost entry in the rbtree:
 998         */
 999        rightmost = __pick_last_entity(cfs_rq);
1000        /*
1001         * Already in the rightmost position?
1002         */
1003        if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
1004                return;
1005
1006        /*
1007         * Minimally necessary key value to be last in the tree:
1008         * Upon rescheduling, sched_class::put_prev_task() will place
1009         * 'current' within the tree based on its new key value.
1010         */
1011        se->vruntime = rightmost->vruntime + 1;
1012}
1013
1014/*
1015 * wake_idle() will wake a task on an idle cpu if task->cpu is
1016 * not idle and an idle cpu is available.  The span of cpus to
1017 * search starts with cpus closest then further out as needed,
1018 * so we always favor a closer, idle cpu.
1019 * Domains may include CPUs that are not usable for migration,
1020 * hence we need to mask them out (cpu_active_map)
1021 *
1022 * Returns the CPU we should wake onto.
1023 */
1024#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1025static int wake_idle(int cpu, struct task_struct *p)
1026{
1027        cpumask_t tmp;
1028        struct sched_domain *sd;
1029        int i;
1030
1031        /*
1032         * If it is idle, then it is the best cpu to run this task.
1033         *
1034         * This cpu is also the best, if it has more than one task already.
1035         * Siblings must be also busy(in most cases) as they didn't already
1036         * pickup the extra load from this cpu and hence we need not check
1037         * sibling runqueue info. This will avoid the checks and cache miss
1038         * penalities associated with that.
1039         */
1040        if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
1041                return cpu;
1042
1043        for_each_domain(cpu, sd) {
1044                if ((sd->flags & SD_WAKE_IDLE)
1045                    || ((sd->flags & SD_WAKE_IDLE_FAR)
1046                        && !task_hot(p, task_rq(p)->clock, sd))) {
1047                        cpus_and(tmp, sd->span, p->cpus_allowed);
1048                        cpus_and(tmp, tmp, cpu_active_map);
1049                        for_each_cpu_mask_nr(i, tmp) {
1050                                if (idle_cpu(i)) {
1051                                        if (i != task_cpu(p)) {
1052                                                schedstat_inc(p,
1053                                                       se.nr_wakeups_idle);
1054                                        }
1055                                        return i;
1056                                }
1057                        }
1058                } else {
1059                        break;
1060                }
1061        }
1062        return cpu;
1063}
1064#else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
1065static inline int wake_idle(int cpu, struct task_struct *p)
1066{
1067        return cpu;
1068}
1069#endif
1070
1071#ifdef CONFIG_SMP
1072
1073#ifdef CONFIG_FAIR_GROUP_SCHED
1074/*
1075 * effective_load() calculates the load change as seen from the root_task_group
1076 *
1077 * Adding load to a group doesn't make a group heavier, but can cause movement
1078 * of group shares between cpus. Assuming the shares were perfectly aligned one
1079 * can calculate the shift in shares.
1080 *
1081 * The problem is that perfectly aligning the shares is rather expensive, hence
1082 * we try to avoid doing that too often - see update_shares(), which ratelimits
1083 * this change.
1084 *
1085 * We compensate this by not only taking the current delta into account, but
1086 * also considering the delta between when the shares were last adjusted and
1087 * now.
1088 *
1089 * We still saw a performance dip, some tracing learned us that between
1090 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1091 * significantly. Therefore try to bias the error in direction of failing
1092 * the affine wakeup.
1093 *
1094 */
1095static long effective_load(struct task_group *tg, int cpu,
1096                long wl, long wg)
1097{
1098        struct sched_entity *se = tg->se[cpu];
1099
1100        if (!tg->parent)
1101                return wl;
1102
1103        /*
1104         * By not taking the decrease of shares on the other cpu into
1105         * account our error leans towards reducing the affine wakeups.
1106         */
1107        if (!wl && sched_feat(ASYM_EFF_LOAD))
1108                return wl;
1109
1110        for_each_sched_entity(se) {
1111                long S, rw, s, a, b;
1112                long more_w;
1113
1114                /*
1115                 * Instead of using this increment, also add the difference
1116                 * between when the shares were last updated and now.
1117                 */
1118                more_w = se->my_q->load.weight - se->my_q->rq_weight;
1119                wl += more_w;
1120                wg += more_w;
1121
1122                S = se->my_q->tg->shares;
1123                s = se->my_q->shares;
1124                rw = se->my_q->rq_weight;
1125
1126                a = S*(rw + wl);
1127                b = S*rw + s*wg;
1128
1129                wl = s*(a-b);
1130
1131                if (likely(b))
1132                        wl /= b;
1133
1134                /*
1135                 * Assume the group is already running and will
1136                 * thus already be accounted for in the weight.
1137                 *
1138                 * That is, moving shares between CPUs, does not
1139                 * alter the group weight.
1140                 */
1141                wg = 0;
1142        }
1143
1144        return wl;
1145}
1146
1147#else
1148
1149static inline unsigned long effective_load(struct task_group *tg, int cpu,
1150                unsigned long wl, unsigned long wg)
1151{
1152        return wl;
1153}
1154
1155#endif
1156
1157static int
1158wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1159            struct task_struct *p, int prev_cpu, int this_cpu, int sync,
1160            int idx, unsigned long load, unsigned long this_load,
1161            unsigned int imbalance)
1162{
1163        struct task_struct *curr = this_rq->curr;
1164        struct task_group *tg;
1165        unsigned long tl = this_load;
1166        unsigned long tl_per_task;
1167        unsigned long weight;
1168        int balanced;
1169
1170        if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1171                return 0;
1172
1173        if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1174                        p->se.avg_overlap > sysctl_sched_migration_cost))
1175                sync = 0;
1176
1177        /*
1178         * If sync wakeup then subtract the (maximum possible)
1179         * effect of the currently running task from the load
1180         * of the current CPU:
1181         */
1182        if (sync) {
1183                tg = task_group(current);
1184                weight = current->se.load.weight;
1185
1186                tl += effective_load(tg, this_cpu, -weight, -weight);
1187                load += effective_load(tg, prev_cpu, 0, -weight);
1188        }
1189
1190        tg = task_group(p);
1191        weight = p->se.load.weight;
1192
1193        balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
1194                imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1195
1196        /*
1197         * If the currently running task will sleep within
1198         * a reasonable amount of time then attract this newly
1199         * woken task:
1200         */
1201        if (sync && balanced)
1202                return 1;
1203
1204        schedstat_inc(p, se.nr_wakeups_affine_attempts);
1205        tl_per_task = cpu_avg_load_per_task(this_cpu);
1206
1207        if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
1208                        tl_per_task)) {
1209                /*
1210                 * This domain has SD_WAKE_AFFINE and
1211                 * p is cache cold in this domain, and
1212                 * there is no bad imbalance.
1213                 */
1214                schedstat_inc(this_sd, ttwu_move_affine);
1215                schedstat_inc(p, se.nr_wakeups_affine);
1216
1217                return 1;
1218        }
1219        return 0;
1220}
1221
1222static int select_task_rq_fair(struct task_struct *p, int sync)
1223{
1224        struct sched_domain *sd, *this_sd = NULL;
1225        int prev_cpu, this_cpu, new_cpu;
1226        unsigned long load, this_load;
1227        struct rq *this_rq;
1228        unsigned int imbalance;
1229        int idx;
1230
1231        prev_cpu        = task_cpu(p);
1232        this_cpu        = smp_processor_id();
1233        this_rq                = cpu_rq(this_cpu);
1234        new_cpu                = prev_cpu;
1235
1236        if (prev_cpu == this_cpu)
1237                goto out;
1238        /*
1239         * 'this_sd' is the first domain that both
1240         * this_cpu and prev_cpu are present in:
1241         */
1242        for_each_domain(this_cpu, sd) {
1243                if (cpu_isset(prev_cpu, sd->span)) {
1244                        this_sd = sd;
1245                        break;
1246                }
1247        }
1248
1249        if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1250                goto out;
1251
1252        /*
1253         * Check for affine wakeup and passive balancing possibilities.
1254         */
1255        if (!this_sd)
1256                goto out;
1257
1258        idx = this_sd->wake_idx;
1259
1260        imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1261
1262        load = source_load(prev_cpu, idx);
1263        this_load = target_load(this_cpu, idx);
1264
1265        if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
1266                                     load, this_load, imbalance))
1267                return this_cpu;
1268
1269        /*
1270         * Start passive balancing when half the imbalance_pct
1271         * limit is reached.
1272         */
1273        if (this_sd->flags & SD_WAKE_BALANCE) {
1274                if (imbalance*this_load <= 100*load) {
1275                        schedstat_inc(this_sd, ttwu_move_balance);
1276                        schedstat_inc(p, se.nr_wakeups_passive);
1277                        return this_cpu;
1278                }
1279        }
1280
1281out:
1282        return wake_idle(new_cpu, p);
1283}
1284#endif /* CONFIG_SMP */
1285
1286static unsigned long wakeup_gran(struct sched_entity *se)
1287{
1288        unsigned long gran = sysctl_sched_wakeup_granularity;
1289
1290        /*
1291         * More easily preempt - nice tasks, while not making it harder for
1292         * + nice tasks.
1293         */
1294        if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
1295                gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
1296
1297        return gran;
1298}
1299
1300/*
1301 * Should 'se' preempt 'curr'.
1302 *
1303 *             |s1
1304 *        |s2
1305 *   |s3
1306 *         g
1307 *      |<--->|c
1308 *
1309 *  w(c, s1) = -1
1310 *  w(c, s2) =  0
1311 *  w(c, s3) =  1
1312 *
1313 */
1314static int
1315wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1316{
1317        s64 gran, vdiff = curr->vruntime - se->vruntime;
1318
1319        if (vdiff <= 0)
1320                return -1;
1321
1322        gran = wakeup_gran(curr);
1323        if (vdiff > gran)
1324                return 1;
1325
1326        return 0;
1327}
1328
1329static void set_last_buddy(struct sched_entity *se)
1330{
1331        for_each_sched_entity(se)
1332                cfs_rq_of(se)->last = se;
1333}
1334
1335static void set_next_buddy(struct sched_entity *se)
1336{
1337        for_each_sched_entity(se)
1338                cfs_rq_of(se)->next = se;
1339}
1340
1341/*
1342 * Preempt the current task with a newly woken task if needed:
1343 */
1344static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1345{
1346        struct task_struct *curr = rq->curr;
1347        struct sched_entity *se = &curr->se, *pse = &p->se;
1348
1349        if (unlikely(rt_prio(p->prio))) {
1350                struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1351
1352                update_rq_clock(rq);
1353                update_curr(cfs_rq);
1354                resched_task(curr);
1355                return;
1356        }
1357
1358        if (unlikely(p->sched_class != &fair_sched_class))
1359                return;
1360
1361        if (unlikely(se == pse))
1362                return;
1363
1364        /*
1365         * Only set the backward buddy when the current task is still on the
1366         * rq. This can happen when a wakeup gets interleaved with schedule on
1367         * the ->pre_schedule() or idle_balance() point, either of which can
1368         * drop the rq lock.
1369         *
1370         * Also, during early boot the idle thread is in the fair class, for
1371         * obvious reasons its a bad idea to schedule back to the idle thread.
1372         */
1373        if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1374                set_last_buddy(se);
1375        set_next_buddy(pse);
1376
1377        /*
1378         * We can come here with TIF_NEED_RESCHED already set from new task
1379         * wake up path.
1380         */
1381        if (test_tsk_need_resched(curr))
1382                return;
1383
1384        /*
1385         * Batch tasks do not preempt (their preemption is driven by
1386         * the tick):
1387         */
1388        if (unlikely(p->policy == SCHED_BATCH))
1389                return;
1390
1391        if (!sched_feat(WAKEUP_PREEMPT))
1392                return;
1393
1394        if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1395                        (se->avg_overlap < sysctl_sched_migration_cost &&
1396                         pse->avg_overlap < sysctl_sched_migration_cost))) {
1397                resched_task(curr);
1398                return;
1399        }
1400
1401        find_matching_se(&se, &pse);
1402
1403        while (se) {
1404                BUG_ON(!pse);
1405
1406                if (wakeup_preempt_entity(se, pse) == 1) {
1407                        resched_task(curr);
1408                        break;
1409                }
1410
1411                se = parent_entity(se);
1412                pse = parent_entity(pse);
1413        }
1414}
1415
1416static struct task_struct *pick_next_task_fair(struct rq *rq)
1417{
1418        struct task_struct *p;
1419        struct cfs_rq *cfs_rq = &rq->cfs;
1420        struct sched_entity *se;
1421
1422        if (unlikely(!cfs_rq->nr_running))
1423                return NULL;
1424
1425        do {
1426                se = pick_next_entity(cfs_rq);
1427                set_next_entity(cfs_rq, se);
1428                cfs_rq = group_cfs_rq(se);
1429        } while (cfs_rq);
1430
1431        p = task_of(se);
1432        hrtick_start_fair(rq, p);
1433
1434        return p;
1435}
1436
1437/*
1438 * Account for a descheduled task:
1439 */
1440static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1441{
1442        struct sched_entity *se = &prev->se;
1443        struct cfs_rq *cfs_rq;
1444
1445        for_each_sched_entity(se) {
1446                cfs_rq = cfs_rq_of(se);
1447                put_prev_entity(cfs_rq, se);
1448        }
1449}
1450
1451#ifdef CONFIG_SMP
1452/**************************************************
1453 * Fair scheduling class load-balancing methods:
1454 */
1455
1456/*
1457 * Load-balancing iterator. Note: while the runqueue stays locked
1458 * during the whole iteration, the current task might be
1459 * dequeued so the iterator has to be dequeue-safe. Here we
1460 * achieve that by always pre-iterating before returning
1461 * the current task:
1462 */
1463static struct task_struct *
1464__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1465{
1466        struct task_struct *p = NULL;
1467        struct sched_entity *se;
1468
1469        if (next == &cfs_rq->tasks)
1470                return NULL;
1471
1472        se = list_entry(next, struct sched_entity, group_node);
1473        p = task_of(se);
1474        cfs_rq->balance_iterator = next->next;
1475
1476        return p;
1477}
1478
1479static struct task_struct *load_balance_start_fair(void *arg)
1480{
1481        struct cfs_rq *cfs_rq = arg;
1482
1483        return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1484}
1485
1486static struct task_struct *load_balance_next_fair(void *arg)
1487{
1488        struct cfs_rq *cfs_rq = arg;
1489
1490        return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1491}
1492
1493static unsigned long
1494__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1495                unsigned long max_load_move, struct sched_domain *sd,
1496                enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1497                struct cfs_rq *cfs_rq)
1498{
1499        struct rq_iterator cfs_rq_iterator;
1500
1501        cfs_rq_iterator.start = load_balance_start_fair;
1502        cfs_rq_iterator.next = load_balance_next_fair;
1503        cfs_rq_iterator.arg = cfs_rq;
1504
1505        return balance_tasks(this_rq, this_cpu, busiest,
1506                        max_load_move, sd, idle, all_pinned,
1507                        this_best_prio, &cfs_rq_iterator);
1508}
1509
1510#ifdef CONFIG_FAIR_GROUP_SCHED
1511static unsigned long
1512load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1513                  unsigned long max_load_move,
1514                  struct sched_domain *sd, enum cpu_idle_type idle,
1515                  int *all_pinned, int *this_best_prio)
1516{
1517        long rem_load_move = max_load_move;
1518        int busiest_cpu = cpu_of(busiest);
1519        struct task_group *tg;
1520
1521        rcu_read_lock();
1522        update_h_load(busiest_cpu);
1523
1524        list_for_each_entry_rcu(tg, &task_groups, list) {
1525                struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1526                unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1527                unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1528                u64 rem_load, moved_load;
1529
1530                /*
1531                 * empty group
1532                 */
1533                if (!busiest_cfs_rq->task_weight)
1534                        continue;
1535
1536                rem_load = (u64)rem_load_move * busiest_weight;
1537                rem_load = div_u64(rem_load, busiest_h_load + 1);
1538
1539                moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1540                                rem_load, sd, idle, all_pinned, this_best_prio,
1541                                tg->cfs_rq[busiest_cpu]);
1542
1543                if (!moved_load)
1544                        continue;
1545
1546                moved_load *= busiest_h_load;
1547                moved_load = div_u64(moved_load, busiest_weight + 1);
1548
1549                rem_load_move -= moved_load;
1550                if (rem_load_move < 0)
1551                        break;
1552        }
1553        rcu_read_unlock();
1554
1555        return max_load_move - rem_load_move;
1556}
1557#else
1558static unsigned long
1559load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1560                  unsigned long max_load_move,
1561                  struct sched_domain *sd, enum cpu_idle_type idle,
1562                  int *all_pinned, int *this_best_prio)
1563{
1564        return __load_balance_fair(this_rq, this_cpu, busiest,
1565                        max_load_move, sd, idle, all_pinned,
1566                        this_best_prio, &busiest->cfs);
1567}
1568#endif
1569
1570static int
1571move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1572                   struct sched_domain *sd, enum cpu_idle_type idle)
1573{
1574        struct cfs_rq *busy_cfs_rq;
1575        struct rq_iterator cfs_rq_iterator;
1576
1577        cfs_rq_iterator.start = load_balance_start_fair;
1578        cfs_rq_iterator.next = load_balance_next_fair;
1579
1580        for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1581                /*
1582                 * pass busy_cfs_rq argument into
1583                 * load_balance_[start|next]_fair iterators
1584                 */
1585                cfs_rq_iterator.arg = busy_cfs_rq;
1586                if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1587                                       &cfs_rq_iterator))
1588                    return 1;
1589        }
1590
1591        return 0;
1592}
1593#endif /* CONFIG_SMP */
1594
1595/*
1596 * scheduler tick hitting a task of our scheduling class:
1597 */
1598static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1599{
1600        struct cfs_rq *cfs_rq;
1601        struct sched_entity *se = &curr->se;
1602
1603        for_each_sched_entity(se) {
1604                cfs_rq = cfs_rq_of(se);
1605                entity_tick(cfs_rq, se, queued);
1606        }
1607}
1608
1609#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1610
1611/*
1612 * Share the fairness runtime between parent and child, thus the
1613 * total amount of pressure for CPU stays equal - new tasks
1614 * get a chance to run but frequent forkers are not allowed to
1615 * monopolize the CPU. Note: the parent runqueue is locked,
1616 * the child is not running yet.
1617 */
1618static void task_new_fair(struct rq *rq, struct task_struct *p)
1619{
1620        struct cfs_rq *cfs_rq = task_cfs_rq(p);
1621        struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1622        int this_cpu = smp_processor_id();
1623
1624        sched_info_queued(p);
1625
1626        update_curr(cfs_rq);
1627        place_entity(cfs_rq, se, 1);
1628
1629        /* 'curr' will be NULL if the child belongs to a different group */
1630        if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1631                        curr && curr->vruntime < se->vruntime) {
1632                /*
1633                 * Upon rescheduling, sched_class::put_prev_task() will place
1634                 * 'current' within the tree based on its new key value.
1635                 */
1636                swap(curr->vruntime, se->vruntime);
1637                resched_task(rq->curr);
1638        }
1639
1640        enqueue_task_fair(rq, p, 0);
1641}
1642
1643/*
1644 * Priority of the task has changed. Check to see if we preempt
1645 * the current task.
1646 */
1647static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1648                              int oldprio, int running)
1649{
1650        /*
1651         * Reschedule if we are currently running on this runqueue and
1652         * our priority decreased, or if we are not currently running on
1653         * this runqueue and our priority is higher than the current's
1654         */
1655        if (running) {
1656                if (p->prio > oldprio)
1657                        resched_task(rq->curr);
1658        } else
1659                check_preempt_curr(rq, p, 0);
1660}
1661
1662/*
1663 * We switched to the sched_fair class.
1664 */
1665static void switched_to_fair(struct rq *rq, struct task_struct *p,
1666                             int running)
1667{
1668        /*
1669         * We were most likely switched from sched_rt, so
1670         * kick off the schedule if running, otherwise just see
1671         * if we can still preempt the current task.
1672         */
1673        if (running)
1674                resched_task(rq->curr);
1675        else
1676                check_preempt_curr(rq, p, 0);
1677}
1678
1679/* Account for a task changing its policy or group.
1680 *
1681 * This routine is mostly called to set cfs_rq->curr field when a task
1682 * migrates between groups/classes.
1683 */
1684static void set_curr_task_fair(struct rq *rq)
1685{
1686        struct sched_entity *se = &rq->curr->se;
1687
1688        for_each_sched_entity(se)
1689                set_next_entity(cfs_rq_of(se), se);
1690}
1691
1692#ifdef CONFIG_FAIR_GROUP_SCHED
1693static void moved_group_fair(struct task_struct *p)
1694{
1695        struct cfs_rq *cfs_rq = task_cfs_rq(p);
1696
1697        update_curr(cfs_rq);
1698        place_entity(cfs_rq, &p->se, 1);
1699}
1700#endif
1701
1702/*
1703 * All the scheduling class methods:
1704 */
1705static const struct sched_class fair_sched_class = {
1706        .next                        = &idle_sched_class,
1707        .enqueue_task                = enqueue_task_fair,
1708        .dequeue_task                = dequeue_task_fair,
1709        .yield_task                = yield_task_fair,
1710
1711        .check_preempt_curr        = check_preempt_wakeup,
1712
1713        .pick_next_task                = pick_next_task_fair,
1714        .put_prev_task                = put_prev_task_fair,
1715
1716#ifdef CONFIG_SMP
1717        .select_task_rq                = select_task_rq_fair,
1718
1719        .load_balance                = load_balance_fair,
1720        .move_one_task                = move_one_task_fair,
1721#endif
1722
1723        .set_curr_task          = set_curr_task_fair,
1724        .task_tick                = task_tick_fair,
1725        .task_new                = task_new_fair,
1726
1727        .prio_changed                = prio_changed_fair,
1728        .switched_to                = switched_to_fair,
1729
1730#ifdef CONFIG_FAIR_GROUP_SCHED
1731        .moved_group                = moved_group_fair,
1732#endif
1733};
1734
1735#ifdef CONFIG_SCHED_DEBUG
1736static void print_cfs_stats(struct seq_file *m, int cpu)
1737{
1738        struct cfs_rq *cfs_rq;
1739
1740        rcu_read_lock();
1741        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1742                print_cfs_rq(m, cpu, cfs_rq);
1743        rcu_read_unlock();
1744}
1745#endif