1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/latencytop.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37unsigned int sysctl_sched_latency = 20000000ULL;
38
39
40
41
42
43unsigned int sysctl_sched_min_granularity = 4000000ULL;
44
45
46
47
48static unsigned int sched_nr_latency = 5;
49
50
51
52
53
54const_debug unsigned int sysctl_sched_child_runs_first = 1;
55
56
57
58
59
60
61
62unsigned int __read_mostly sysctl_sched_compat_yield;
63
64
65
66
67
68
69
70
71
72unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
73
74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75
76static const struct sched_class fair_sched_class;
77
78
79
80
81
82static inline struct task_struct *task_of(struct sched_entity *se)
83{
84 return container_of(se, struct task_struct, se);
85}
86
87#ifdef CONFIG_FAIR_GROUP_SCHED
88
89
90static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
91{
92 return cfs_rq->rq;
93}
94
95
96#define entity_is_task(se) (!se->my_q)
97
98
99#define for_each_sched_entity(se) \
100 for (; se; se = se->parent)
101
102static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
103{
104 return p->se.cfs_rq;
105}
106
107
108static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
109{
110 return se->cfs_rq;
111}
112
113
114static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
115{
116 return grp->my_q;
117}
118
119
120
121
122static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
123{
124 return cfs_rq->tg->cfs_rq[this_cpu];
125}
126
127
128#define for_each_leaf_cfs_rq(rq, cfs_rq) \
129 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
130
131
132static inline int
133is_same_group(struct sched_entity *se, struct sched_entity *pse)
134{
135 if (se->cfs_rq == pse->cfs_rq)
136 return 1;
137
138 return 0;
139}
140
141static inline struct sched_entity *parent_entity(struct sched_entity *se)
142{
143 return se->parent;
144}
145
146
147static inline int depth_se(struct sched_entity *se)
148{
149 int depth = 0;
150
151 for_each_sched_entity(se)
152 depth++;
153
154 return depth;
155}
156
157static void
158find_matching_se(struct sched_entity **se, struct sched_entity **pse)
159{
160 int se_depth, pse_depth;
161
162
163
164
165
166
167
168
169
170 se_depth = depth_se(*se);
171 pse_depth = depth_se(*pse);
172
173 while (se_depth > pse_depth) {
174 se_depth--;
175 *se = parent_entity(*se);
176 }
177
178 while (pse_depth > se_depth) {
179 pse_depth--;
180 *pse = parent_entity(*pse);
181 }
182
183 while (!is_same_group(*se, *pse)) {
184 *se = parent_entity(*se);
185 *pse = parent_entity(*pse);
186 }
187}
188
189#else
190
191static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
192{
193 return container_of(cfs_rq, struct rq, cfs);
194}
195
196#define entity_is_task(se) 1
197
198#define for_each_sched_entity(se) \
199 for (; se; se = NULL)
200
201static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
202{
203 return &task_rq(p)->cfs;
204}
205
206static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
207{
208 struct task_struct *p = task_of(se);
209 struct rq *rq = task_rq(p);
210
211 return &rq->cfs;
212}
213
214
215static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
216{
217 return NULL;
218}
219
220static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
221{
222 return &cpu_rq(this_cpu)->cfs;
223}
224
225#define for_each_leaf_cfs_rq(rq, cfs_rq) \
226 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
227
228static inline int
229is_same_group(struct sched_entity *se, struct sched_entity *pse)
230{
231 return 1;
232}
233
234static inline struct sched_entity *parent_entity(struct sched_entity *se)
235{
236 return NULL;
237}
238
239static inline void
240find_matching_se(struct sched_entity **se, struct sched_entity **pse)
241{
242}
243
244#endif
245
246
247
248
249
250
251static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
252{
253 s64 delta = (s64)(vruntime - min_vruntime);
254 if (delta > 0)
255 min_vruntime = vruntime;
256
257 return min_vruntime;
258}
259
260static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
261{
262 s64 delta = (s64)(vruntime - min_vruntime);
263 if (delta < 0)
264 min_vruntime = vruntime;
265
266 return min_vruntime;
267}
268
269static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
270{
271 return se->vruntime - cfs_rq->min_vruntime;
272}
273
274static void update_min_vruntime(struct cfs_rq *cfs_rq)
275{
276 u64 vruntime = cfs_rq->min_vruntime;
277
278 if (cfs_rq->curr)
279 vruntime = cfs_rq->curr->vruntime;
280
281 if (cfs_rq->rb_leftmost) {
282 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
283 struct sched_entity,
284 run_node);
285
286 if (vruntime == cfs_rq->min_vruntime)
287 vruntime = se->vruntime;
288 else
289 vruntime = min_vruntime(vruntime, se->vruntime);
290 }
291
292 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
293}
294
295
296
297
298static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
299{
300 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
301 struct rb_node *parent = NULL;
302 struct sched_entity *entry;
303 s64 key = entity_key(cfs_rq, se);
304 int leftmost = 1;
305
306
307
308
309 while (*link) {
310 parent = *link;
311 entry = rb_entry(parent, struct sched_entity, run_node);
312
313
314
315
316 if (key < entity_key(cfs_rq, entry)) {
317 link = &parent->rb_left;
318 } else {
319 link = &parent->rb_right;
320 leftmost = 0;
321 }
322 }
323
324
325
326
327
328 if (leftmost)
329 cfs_rq->rb_leftmost = &se->run_node;
330
331 rb_link_node(&se->run_node, parent, link);
332 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
333}
334
335static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
336{
337 if (cfs_rq->rb_leftmost == &se->run_node) {
338 struct rb_node *next_node;
339
340 next_node = rb_next(&se->run_node);
341 cfs_rq->rb_leftmost = next_node;
342 }
343
344 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
345}
346
347static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
348{
349 struct rb_node *left = cfs_rq->rb_leftmost;
350
351 if (!left)
352 return NULL;
353
354 return rb_entry(left, struct sched_entity, run_node);
355}
356
357static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
358{
359 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
360
361 if (!last)
362 return NULL;
363
364 return rb_entry(last, struct sched_entity, run_node);
365}
366
367
368
369
370
371#ifdef CONFIG_SCHED_DEBUG
372int sched_nr_latency_handler(struct ctl_table *table, int write,
373 struct file *filp, void __user *buffer, size_t *lenp,
374 loff_t *ppos)
375{
376 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
377
378 if (ret || !write)
379 return ret;
380
381 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
382 sysctl_sched_min_granularity);
383
384 return 0;
385}
386#endif
387
388
389
390
391static inline unsigned long
392calc_delta_weight(unsigned long delta, struct sched_entity *se)
393{
394 for_each_sched_entity(se) {
395 delta = calc_delta_mine(delta,
396 se->load.weight, &cfs_rq_of(se)->load);
397 }
398
399 return delta;
400}
401
402
403
404
405static inline unsigned long
406calc_delta_fair(unsigned long delta, struct sched_entity *se)
407{
408 if (unlikely(se->load.weight != NICE_0_LOAD))
409 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
410
411 return delta;
412}
413
414
415
416
417
418
419
420
421
422static u64 __sched_period(unsigned long nr_running)
423{
424 u64 period = sysctl_sched_latency;
425 unsigned long nr_latency = sched_nr_latency;
426
427 if (unlikely(nr_running > nr_latency)) {
428 period = sysctl_sched_min_granularity;
429 period *= nr_running;
430 }
431
432 return period;
433}
434
435
436
437
438
439
440
441static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
442{
443 unsigned long nr_running = cfs_rq->nr_running;
444
445 if (unlikely(!se->on_rq))
446 nr_running++;
447
448 return calc_delta_weight(__sched_period(nr_running), se);
449}
450
451
452
453
454
455
456static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
457{
458 return calc_delta_fair(sched_slice(cfs_rq, se), se);
459}
460
461
462
463
464
465static inline void
466__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
467 unsigned long delta_exec)
468{
469 unsigned long delta_exec_weighted;
470
471 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
472
473 curr->sum_exec_runtime += delta_exec;
474 schedstat_add(cfs_rq, exec_clock, delta_exec);
475 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
476 curr->vruntime += delta_exec_weighted;
477 update_min_vruntime(cfs_rq);
478}
479
480static void update_curr(struct cfs_rq *cfs_rq)
481{
482 struct sched_entity *curr = cfs_rq->curr;
483 u64 now = rq_of(cfs_rq)->clock;
484 unsigned long delta_exec;
485
486 if (unlikely(!curr))
487 return;
488
489
490
491
492
493
494 delta_exec = (unsigned long)(now - curr->exec_start);
495
496 __update_curr(cfs_rq, curr, delta_exec);
497 curr->exec_start = now;
498
499 if (entity_is_task(curr)) {
500 struct task_struct *curtask = task_of(curr);
501
502 cpuacct_charge(curtask, delta_exec);
503 account_group_exec_runtime(curtask, delta_exec);
504 }
505}
506
507static inline void
508update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
509{
510 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
511}
512
513
514
515
516static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
517{
518
519
520
521
522 if (se != cfs_rq->curr)
523 update_stats_wait_start(cfs_rq, se);
524}
525
526static void
527update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
528{
529 schedstat_set(se->wait_max, max(se->wait_max,
530 rq_of(cfs_rq)->clock - se->wait_start));
531 schedstat_set(se->wait_count, se->wait_count + 1);
532 schedstat_set(se->wait_sum, se->wait_sum +
533 rq_of(cfs_rq)->clock - se->wait_start);
534 schedstat_set(se->wait_start, 0);
535}
536
537static inline void
538update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
539{
540
541
542
543
544 if (se != cfs_rq->curr)
545 update_stats_wait_end(cfs_rq, se);
546}
547
548
549
550
551static inline void
552update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
553{
554
555
556
557 se->exec_start = rq_of(cfs_rq)->clock;
558}
559
560
561
562
563
564#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
565static void
566add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
567{
568 cfs_rq->task_weight += weight;
569}
570#else
571static inline void
572add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
573{
574}
575#endif
576
577static void
578account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
579{
580 update_load_add(&cfs_rq->load, se->load.weight);
581 if (!parent_entity(se))
582 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
583 if (entity_is_task(se)) {
584 add_cfs_task_weight(cfs_rq, se->load.weight);
585 list_add(&se->group_node, &cfs_rq->tasks);
586 }
587 cfs_rq->nr_running++;
588 se->on_rq = 1;
589}
590
591static void
592account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
593{
594 update_load_sub(&cfs_rq->load, se->load.weight);
595 if (!parent_entity(se))
596 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
597 if (entity_is_task(se)) {
598 add_cfs_task_weight(cfs_rq, -se->load.weight);
599 list_del_init(&se->group_node);
600 }
601 cfs_rq->nr_running--;
602 se->on_rq = 0;
603}
604
605static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
606{
607#ifdef CONFIG_SCHEDSTATS
608 if (se->sleep_start) {
609 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
610 struct task_struct *tsk = task_of(se);
611
612 if ((s64)delta < 0)
613 delta = 0;
614
615 if (unlikely(delta > se->sleep_max))
616 se->sleep_max = delta;
617
618 se->sleep_start = 0;
619 se->sum_sleep_runtime += delta;
620
621 account_scheduler_latency(tsk, delta >> 10, 1);
622 }
623 if (se->block_start) {
624 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
625 struct task_struct *tsk = task_of(se);
626
627 if ((s64)delta < 0)
628 delta = 0;
629
630 if (unlikely(delta > se->block_max))
631 se->block_max = delta;
632
633 se->block_start = 0;
634 se->sum_sleep_runtime += delta;
635
636
637
638
639
640
641 if (unlikely(prof_on == SLEEP_PROFILING)) {
642
643 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
644 delta >> 20);
645 }
646 account_scheduler_latency(tsk, delta >> 10, 0);
647 }
648#endif
649}
650
651static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
652{
653#ifdef CONFIG_SCHED_DEBUG
654 s64 d = se->vruntime - cfs_rq->min_vruntime;
655
656 if (d < 0)
657 d = -d;
658
659 if (d > 3*sysctl_sched_latency)
660 schedstat_inc(cfs_rq, nr_spread_over);
661#endif
662}
663
664static void
665place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
666{
667 u64 vruntime = cfs_rq->min_vruntime;
668
669
670
671
672
673
674
675 if (initial && sched_feat(START_DEBIT))
676 vruntime += sched_vslice(cfs_rq, se);
677
678 if (!initial) {
679
680 if (sched_feat(NEW_FAIR_SLEEPERS)) {
681 unsigned long thresh = sysctl_sched_latency;
682
683
684
685
686 if (sched_feat(NORMALIZED_SLEEPER))
687 thresh = calc_delta_fair(thresh, se);
688
689 vruntime -= thresh;
690 }
691
692
693 vruntime = max_vruntime(se->vruntime, vruntime);
694 }
695
696 se->vruntime = vruntime;
697}
698
699static void
700enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
701{
702
703
704
705 update_curr(cfs_rq);
706 account_entity_enqueue(cfs_rq, se);
707
708 if (wakeup) {
709 place_entity(cfs_rq, se, 0);
710 enqueue_sleeper(cfs_rq, se);
711 }
712
713 update_stats_enqueue(cfs_rq, se);
714 check_spread(cfs_rq, se);
715 if (se != cfs_rq->curr)
716 __enqueue_entity(cfs_rq, se);
717}
718
719static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
720{
721 if (cfs_rq->last == se)
722 cfs_rq->last = NULL;
723
724 if (cfs_rq->next == se)
725 cfs_rq->next = NULL;
726}
727
728static void
729dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
730{
731
732
733
734 update_curr(cfs_rq);
735
736 update_stats_dequeue(cfs_rq, se);
737 if (sleep) {
738#ifdef CONFIG_SCHEDSTATS
739 if (entity_is_task(se)) {
740 struct task_struct *tsk = task_of(se);
741
742 if (tsk->state & TASK_INTERRUPTIBLE)
743 se->sleep_start = rq_of(cfs_rq)->clock;
744 if (tsk->state & TASK_UNINTERRUPTIBLE)
745 se->block_start = rq_of(cfs_rq)->clock;
746 }
747#endif
748 }
749
750 clear_buddies(cfs_rq, se);
751
752 if (se != cfs_rq->curr)
753 __dequeue_entity(cfs_rq, se);
754 account_entity_dequeue(cfs_rq, se);
755 update_min_vruntime(cfs_rq);
756}
757
758
759
760
761static void
762check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
763{
764 unsigned long ideal_runtime, delta_exec;
765
766 ideal_runtime = sched_slice(cfs_rq, curr);
767 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
768 if (delta_exec > ideal_runtime)
769 resched_task(rq_of(cfs_rq)->curr);
770}
771
772static void
773set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
774{
775
776 if (se->on_rq) {
777
778
779
780
781
782 update_stats_wait_end(cfs_rq, se);
783 __dequeue_entity(cfs_rq, se);
784 }
785
786 update_stats_curr_start(cfs_rq, se);
787 cfs_rq->curr = se;
788#ifdef CONFIG_SCHEDSTATS
789
790
791
792
793
794 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
795 se->slice_max = max(se->slice_max,
796 se->sum_exec_runtime - se->prev_sum_exec_runtime);
797 }
798#endif
799 se->prev_sum_exec_runtime = se->sum_exec_runtime;
800}
801
802static int
803wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
804
805static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
806{
807 struct sched_entity *se = __pick_next_entity(cfs_rq);
808
809 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
810 return cfs_rq->next;
811
812 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
813 return cfs_rq->last;
814
815 return se;
816}
817
818static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
819{
820
821
822
823
824 if (prev->on_rq)
825 update_curr(cfs_rq);
826
827 check_spread(cfs_rq, prev);
828 if (prev->on_rq) {
829 update_stats_wait_start(cfs_rq, prev);
830
831 __enqueue_entity(cfs_rq, prev);
832 }
833 cfs_rq->curr = NULL;
834}
835
836static void
837entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
838{
839
840
841
842 update_curr(cfs_rq);
843
844#ifdef CONFIG_SCHED_HRTICK
845
846
847
848
849 if (queued) {
850 resched_task(rq_of(cfs_rq)->curr);
851 return;
852 }
853
854
855
856 if (!sched_feat(DOUBLE_TICK) &&
857 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
858 return;
859#endif
860
861 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
862 check_preempt_tick(cfs_rq, curr);
863}
864
865
866
867
868
869#ifdef CONFIG_SCHED_HRTICK
870static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
871{
872 struct sched_entity *se = &p->se;
873 struct cfs_rq *cfs_rq = cfs_rq_of(se);
874
875 WARN_ON(task_rq(p) != rq);
876
877 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
878 u64 slice = sched_slice(cfs_rq, se);
879 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
880 s64 delta = slice - ran;
881
882 if (delta < 0) {
883 if (rq->curr == p)
884 resched_task(p);
885 return;
886 }
887
888
889
890
891
892 if (rq->curr != p)
893 delta = max_t(s64, 10000LL, delta);
894
895 hrtick_start(rq, delta);
896 }
897}
898
899
900
901
902
903
904static void hrtick_update(struct rq *rq)
905{
906 struct task_struct *curr = rq->curr;
907
908 if (curr->sched_class != &fair_sched_class)
909 return;
910
911 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
912 hrtick_start_fair(rq, curr);
913}
914#else
915static inline void
916hrtick_start_fair(struct rq *rq, struct task_struct *p)
917{
918}
919
920static inline void hrtick_update(struct rq *rq)
921{
922}
923#endif
924
925
926
927
928
929
930static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
931{
932 struct cfs_rq *cfs_rq;
933 struct sched_entity *se = &p->se;
934
935 for_each_sched_entity(se) {
936 if (se->on_rq)
937 break;
938 cfs_rq = cfs_rq_of(se);
939 enqueue_entity(cfs_rq, se, wakeup);
940 wakeup = 1;
941 }
942
943 hrtick_update(rq);
944}
945
946
947
948
949
950
951static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
952{
953 struct cfs_rq *cfs_rq;
954 struct sched_entity *se = &p->se;
955
956 for_each_sched_entity(se) {
957 cfs_rq = cfs_rq_of(se);
958 dequeue_entity(cfs_rq, se, sleep);
959
960 if (cfs_rq->load.weight)
961 break;
962 sleep = 1;
963 }
964
965 hrtick_update(rq);
966}
967
968
969
970
971
972
973static void yield_task_fair(struct rq *rq)
974{
975 struct task_struct *curr = rq->curr;
976 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
977 struct sched_entity *rightmost, *se = &curr->se;
978
979
980
981
982 if (unlikely(cfs_rq->nr_running == 1))
983 return;
984
985 clear_buddies(cfs_rq, se);
986
987 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
988 update_rq_clock(rq);
989
990
991
992 update_curr(cfs_rq);
993
994 return;
995 }
996
997
998
999 rightmost = __pick_last_entity(cfs_rq);
1000
1001
1002
1003 if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
1004 return;
1005
1006
1007
1008
1009
1010
1011 se->vruntime = rightmost->vruntime + 1;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1025static int wake_idle(int cpu, struct task_struct *p)
1026{
1027 cpumask_t tmp;
1028 struct sched_domain *sd;
1029 int i;
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
1041 return cpu;
1042
1043 for_each_domain(cpu, sd) {
1044 if ((sd->flags & SD_WAKE_IDLE)
1045 || ((sd->flags & SD_WAKE_IDLE_FAR)
1046 && !task_hot(p, task_rq(p)->clock, sd))) {
1047 cpus_and(tmp, sd->span, p->cpus_allowed);
1048 cpus_and(tmp, tmp, cpu_active_map);
1049 for_each_cpu_mask_nr(i, tmp) {
1050 if (idle_cpu(i)) {
1051 if (i != task_cpu(p)) {
1052 schedstat_inc(p,
1053 se.nr_wakeups_idle);
1054 }
1055 return i;
1056 }
1057 }
1058 } else {
1059 break;
1060 }
1061 }
1062 return cpu;
1063}
1064#else
1065static inline int wake_idle(int cpu, struct task_struct *p)
1066{
1067 return cpu;
1068}
1069#endif
1070
1071#ifdef CONFIG_SMP
1072
1073#ifdef CONFIG_FAIR_GROUP_SCHED
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static long effective_load(struct task_group *tg, int cpu,
1096 long wl, long wg)
1097{
1098 struct sched_entity *se = tg->se[cpu];
1099
1100 if (!tg->parent)
1101 return wl;
1102
1103
1104
1105
1106
1107 if (!wl && sched_feat(ASYM_EFF_LOAD))
1108 return wl;
1109
1110 for_each_sched_entity(se) {
1111 long S, rw, s, a, b;
1112 long more_w;
1113
1114
1115
1116
1117
1118 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1119 wl += more_w;
1120 wg += more_w;
1121
1122 S = se->my_q->tg->shares;
1123 s = se->my_q->shares;
1124 rw = se->my_q->rq_weight;
1125
1126 a = S*(rw + wl);
1127 b = S*rw + s*wg;
1128
1129 wl = s*(a-b);
1130
1131 if (likely(b))
1132 wl /= b;
1133
1134
1135
1136
1137
1138
1139
1140
1141 wg = 0;
1142 }
1143
1144 return wl;
1145}
1146
1147#else
1148
1149static inline unsigned long effective_load(struct task_group *tg, int cpu,
1150 unsigned long wl, unsigned long wg)
1151{
1152 return wl;
1153}
1154
1155#endif
1156
1157static int
1158wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1159 struct task_struct *p, int prev_cpu, int this_cpu, int sync,
1160 int idx, unsigned long load, unsigned long this_load,
1161 unsigned int imbalance)
1162{
1163 struct task_struct *curr = this_rq->curr;
1164 struct task_group *tg;
1165 unsigned long tl = this_load;
1166 unsigned long tl_per_task;
1167 unsigned long weight;
1168 int balanced;
1169
1170 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1171 return 0;
1172
1173 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1174 p->se.avg_overlap > sysctl_sched_migration_cost))
1175 sync = 0;
1176
1177
1178
1179
1180
1181
1182 if (sync) {
1183 tg = task_group(current);
1184 weight = current->se.load.weight;
1185
1186 tl += effective_load(tg, this_cpu, -weight, -weight);
1187 load += effective_load(tg, prev_cpu, 0, -weight);
1188 }
1189
1190 tg = task_group(p);
1191 weight = p->se.load.weight;
1192
1193 balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
1194 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1195
1196
1197
1198
1199
1200
1201 if (sync && balanced)
1202 return 1;
1203
1204 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1205 tl_per_task = cpu_avg_load_per_task(this_cpu);
1206
1207 if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
1208 tl_per_task)) {
1209
1210
1211
1212
1213
1214 schedstat_inc(this_sd, ttwu_move_affine);
1215 schedstat_inc(p, se.nr_wakeups_affine);
1216
1217 return 1;
1218 }
1219 return 0;
1220}
1221
1222static int select_task_rq_fair(struct task_struct *p, int sync)
1223{
1224 struct sched_domain *sd, *this_sd = NULL;
1225 int prev_cpu, this_cpu, new_cpu;
1226 unsigned long load, this_load;
1227 struct rq *this_rq;
1228 unsigned int imbalance;
1229 int idx;
1230
1231 prev_cpu = task_cpu(p);
1232 this_cpu = smp_processor_id();
1233 this_rq = cpu_rq(this_cpu);
1234 new_cpu = prev_cpu;
1235
1236 if (prev_cpu == this_cpu)
1237 goto out;
1238
1239
1240
1241
1242 for_each_domain(this_cpu, sd) {
1243 if (cpu_isset(prev_cpu, sd->span)) {
1244 this_sd = sd;
1245 break;
1246 }
1247 }
1248
1249 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1250 goto out;
1251
1252
1253
1254
1255 if (!this_sd)
1256 goto out;
1257
1258 idx = this_sd->wake_idx;
1259
1260 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1261
1262 load = source_load(prev_cpu, idx);
1263 this_load = target_load(this_cpu, idx);
1264
1265 if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
1266 load, this_load, imbalance))
1267 return this_cpu;
1268
1269
1270
1271
1272
1273 if (this_sd->flags & SD_WAKE_BALANCE) {
1274 if (imbalance*this_load <= 100*load) {
1275 schedstat_inc(this_sd, ttwu_move_balance);
1276 schedstat_inc(p, se.nr_wakeups_passive);
1277 return this_cpu;
1278 }
1279 }
1280
1281out:
1282 return wake_idle(new_cpu, p);
1283}
1284#endif
1285
1286static unsigned long wakeup_gran(struct sched_entity *se)
1287{
1288 unsigned long gran = sysctl_sched_wakeup_granularity;
1289
1290
1291
1292
1293
1294 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
1295 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
1296
1297 return gran;
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314static int
1315wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1316{
1317 s64 gran, vdiff = curr->vruntime - se->vruntime;
1318
1319 if (vdiff <= 0)
1320 return -1;
1321
1322 gran = wakeup_gran(curr);
1323 if (vdiff > gran)
1324 return 1;
1325
1326 return 0;
1327}
1328
1329static void set_last_buddy(struct sched_entity *se)
1330{
1331 for_each_sched_entity(se)
1332 cfs_rq_of(se)->last = se;
1333}
1334
1335static void set_next_buddy(struct sched_entity *se)
1336{
1337 for_each_sched_entity(se)
1338 cfs_rq_of(se)->next = se;
1339}
1340
1341
1342
1343
1344static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1345{
1346 struct task_struct *curr = rq->curr;
1347 struct sched_entity *se = &curr->se, *pse = &p->se;
1348
1349 if (unlikely(rt_prio(p->prio))) {
1350 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1351
1352 update_rq_clock(rq);
1353 update_curr(cfs_rq);
1354 resched_task(curr);
1355 return;
1356 }
1357
1358 if (unlikely(p->sched_class != &fair_sched_class))
1359 return;
1360
1361 if (unlikely(se == pse))
1362 return;
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1374 set_last_buddy(se);
1375 set_next_buddy(pse);
1376
1377
1378
1379
1380
1381 if (test_tsk_need_resched(curr))
1382 return;
1383
1384
1385
1386
1387
1388 if (unlikely(p->policy == SCHED_BATCH))
1389 return;
1390
1391 if (!sched_feat(WAKEUP_PREEMPT))
1392 return;
1393
1394 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1395 (se->avg_overlap < sysctl_sched_migration_cost &&
1396 pse->avg_overlap < sysctl_sched_migration_cost))) {
1397 resched_task(curr);
1398 return;
1399 }
1400
1401 find_matching_se(&se, &pse);
1402
1403 while (se) {
1404 BUG_ON(!pse);
1405
1406 if (wakeup_preempt_entity(se, pse) == 1) {
1407 resched_task(curr);
1408 break;
1409 }
1410
1411 se = parent_entity(se);
1412 pse = parent_entity(pse);
1413 }
1414}
1415
1416static struct task_struct *pick_next_task_fair(struct rq *rq)
1417{
1418 struct task_struct *p;
1419 struct cfs_rq *cfs_rq = &rq->cfs;
1420 struct sched_entity *se;
1421
1422 if (unlikely(!cfs_rq->nr_running))
1423 return NULL;
1424
1425 do {
1426 se = pick_next_entity(cfs_rq);
1427 set_next_entity(cfs_rq, se);
1428 cfs_rq = group_cfs_rq(se);
1429 } while (cfs_rq);
1430
1431 p = task_of(se);
1432 hrtick_start_fair(rq, p);
1433
1434 return p;
1435}
1436
1437
1438
1439
1440static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1441{
1442 struct sched_entity *se = &prev->se;
1443 struct cfs_rq *cfs_rq;
1444
1445 for_each_sched_entity(se) {
1446 cfs_rq = cfs_rq_of(se);
1447 put_prev_entity(cfs_rq, se);
1448 }
1449}
1450
1451#ifdef CONFIG_SMP
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463static struct task_struct *
1464__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1465{
1466 struct task_struct *p = NULL;
1467 struct sched_entity *se;
1468
1469 if (next == &cfs_rq->tasks)
1470 return NULL;
1471
1472 se = list_entry(next, struct sched_entity, group_node);
1473 p = task_of(se);
1474 cfs_rq->balance_iterator = next->next;
1475
1476 return p;
1477}
1478
1479static struct task_struct *load_balance_start_fair(void *arg)
1480{
1481 struct cfs_rq *cfs_rq = arg;
1482
1483 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1484}
1485
1486static struct task_struct *load_balance_next_fair(void *arg)
1487{
1488 struct cfs_rq *cfs_rq = arg;
1489
1490 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1491}
1492
1493static unsigned long
1494__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1495 unsigned long max_load_move, struct sched_domain *sd,
1496 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1497 struct cfs_rq *cfs_rq)
1498{
1499 struct rq_iterator cfs_rq_iterator;
1500
1501 cfs_rq_iterator.start = load_balance_start_fair;
1502 cfs_rq_iterator.next = load_balance_next_fair;
1503 cfs_rq_iterator.arg = cfs_rq;
1504
1505 return balance_tasks(this_rq, this_cpu, busiest,
1506 max_load_move, sd, idle, all_pinned,
1507 this_best_prio, &cfs_rq_iterator);
1508}
1509
1510#ifdef CONFIG_FAIR_GROUP_SCHED
1511static unsigned long
1512load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1513 unsigned long max_load_move,
1514 struct sched_domain *sd, enum cpu_idle_type idle,
1515 int *all_pinned, int *this_best_prio)
1516{
1517 long rem_load_move = max_load_move;
1518 int busiest_cpu = cpu_of(busiest);
1519 struct task_group *tg;
1520
1521 rcu_read_lock();
1522 update_h_load(busiest_cpu);
1523
1524 list_for_each_entry_rcu(tg, &task_groups, list) {
1525 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1526 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1527 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1528 u64 rem_load, moved_load;
1529
1530
1531
1532
1533 if (!busiest_cfs_rq->task_weight)
1534 continue;
1535
1536 rem_load = (u64)rem_load_move * busiest_weight;
1537 rem_load = div_u64(rem_load, busiest_h_load + 1);
1538
1539 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1540 rem_load, sd, idle, all_pinned, this_best_prio,
1541 tg->cfs_rq[busiest_cpu]);
1542
1543 if (!moved_load)
1544 continue;
1545
1546 moved_load *= busiest_h_load;
1547 moved_load = div_u64(moved_load, busiest_weight + 1);
1548
1549 rem_load_move -= moved_load;
1550 if (rem_load_move < 0)
1551 break;
1552 }
1553 rcu_read_unlock();
1554
1555 return max_load_move - rem_load_move;
1556}
1557#else
1558static unsigned long
1559load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1560 unsigned long max_load_move,
1561 struct sched_domain *sd, enum cpu_idle_type idle,
1562 int *all_pinned, int *this_best_prio)
1563{
1564 return __load_balance_fair(this_rq, this_cpu, busiest,
1565 max_load_move, sd, idle, all_pinned,
1566 this_best_prio, &busiest->cfs);
1567}
1568#endif
1569
1570static int
1571move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1572 struct sched_domain *sd, enum cpu_idle_type idle)
1573{
1574 struct cfs_rq *busy_cfs_rq;
1575 struct rq_iterator cfs_rq_iterator;
1576
1577 cfs_rq_iterator.start = load_balance_start_fair;
1578 cfs_rq_iterator.next = load_balance_next_fair;
1579
1580 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1581
1582
1583
1584
1585 cfs_rq_iterator.arg = busy_cfs_rq;
1586 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1587 &cfs_rq_iterator))
1588 return 1;
1589 }
1590
1591 return 0;
1592}
1593#endif
1594
1595
1596
1597
1598static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1599{
1600 struct cfs_rq *cfs_rq;
1601 struct sched_entity *se = &curr->se;
1602
1603 for_each_sched_entity(se) {
1604 cfs_rq = cfs_rq_of(se);
1605 entity_tick(cfs_rq, se, queued);
1606 }
1607}
1608
1609#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1610
1611
1612
1613
1614
1615
1616
1617
1618static void task_new_fair(struct rq *rq, struct task_struct *p)
1619{
1620 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1621 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1622 int this_cpu = smp_processor_id();
1623
1624 sched_info_queued(p);
1625
1626 update_curr(cfs_rq);
1627 place_entity(cfs_rq, se, 1);
1628
1629
1630 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1631 curr && curr->vruntime < se->vruntime) {
1632
1633
1634
1635
1636 swap(curr->vruntime, se->vruntime);
1637 resched_task(rq->curr);
1638 }
1639
1640 enqueue_task_fair(rq, p, 0);
1641}
1642
1643
1644
1645
1646
1647static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1648 int oldprio, int running)
1649{
1650
1651
1652
1653
1654
1655 if (running) {
1656 if (p->prio > oldprio)
1657 resched_task(rq->curr);
1658 } else
1659 check_preempt_curr(rq, p, 0);
1660}
1661
1662
1663
1664
1665static void switched_to_fair(struct rq *rq, struct task_struct *p,
1666 int running)
1667{
1668
1669
1670
1671
1672
1673 if (running)
1674 resched_task(rq->curr);
1675 else
1676 check_preempt_curr(rq, p, 0);
1677}
1678
1679
1680
1681
1682
1683
1684static void set_curr_task_fair(struct rq *rq)
1685{
1686 struct sched_entity *se = &rq->curr->se;
1687
1688 for_each_sched_entity(se)
1689 set_next_entity(cfs_rq_of(se), se);
1690}
1691
1692#ifdef CONFIG_FAIR_GROUP_SCHED
1693static void moved_group_fair(struct task_struct *p)
1694{
1695 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1696
1697 update_curr(cfs_rq);
1698 place_entity(cfs_rq, &p->se, 1);
1699}
1700#endif
1701
1702
1703
1704
1705static const struct sched_class fair_sched_class = {
1706 .next = &idle_sched_class,
1707 .enqueue_task = enqueue_task_fair,
1708 .dequeue_task = dequeue_task_fair,
1709 .yield_task = yield_task_fair,
1710
1711 .check_preempt_curr = check_preempt_wakeup,
1712
1713 .pick_next_task = pick_next_task_fair,
1714 .put_prev_task = put_prev_task_fair,
1715
1716#ifdef CONFIG_SMP
1717 .select_task_rq = select_task_rq_fair,
1718
1719 .load_balance = load_balance_fair,
1720 .move_one_task = move_one_task_fair,
1721#endif
1722
1723 .set_curr_task = set_curr_task_fair,
1724 .task_tick = task_tick_fair,
1725 .task_new = task_new_fair,
1726
1727 .prio_changed = prio_changed_fair,
1728 .switched_to = switched_to_fair,
1729
1730#ifdef CONFIG_FAIR_GROUP_SCHED
1731 .moved_group = moved_group_fair,
1732#endif
1733};
1734
1735#ifdef CONFIG_SCHED_DEBUG
1736static void print_cfs_stats(struct seq_file *m, int cpu)
1737{
1738 struct cfs_rq *cfs_rq;
1739
1740 rcu_read_lock();
1741 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1742 print_cfs_rq(m, cpu, cfs_rq);
1743 rcu_read_unlock();
1744}
1745#endif