1
2#ifdef CONFIG_SCHEDSTATS
3
4
5
6
7#define SCHEDSTAT_VERSION 14
8
9static int show_schedstat(struct seq_file *seq, void *v)
10{
11 int cpu;
12 int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
13 char *mask_str = kmalloc(mask_len, GFP_KERNEL);
14
15 if (mask_str == NULL)
16 return -ENOMEM;
17
18 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
19 seq_printf(seq, "timestamp %lu\n", jiffies);
20 for_each_online_cpu(cpu) {
21 struct rq *rq = cpu_rq(cpu);
22#ifdef CONFIG_SMP
23 struct sched_domain *sd;
24 int dcount = 0;
25#endif
26
27
28 seq_printf(seq,
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
30 cpu, rq->yld_both_empty,
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local,
34 rq->rq_sched_info.cpu_time,
35 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
36
37 seq_printf(seq, "\n");
38
39#ifdef CONFIG_SMP
40
41 preempt_disable();
42 for_each_domain(cpu, sd) {
43 enum cpu_idle_type itype;
44
45 cpumask_scnprintf(mask_str, mask_len, sd->span);
46 seq_printf(seq, "domain%d %s", dcount++, mask_str);
47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
48 itype++) {
49 seq_printf(seq, " %u %u %u %u %u %u %u %u",
50 sd->lb_count[itype],
51 sd->lb_balanced[itype],
52 sd->lb_failed[itype],
53 sd->lb_imbalance[itype],
54 sd->lb_gained[itype],
55 sd->lb_hot_gained[itype],
56 sd->lb_nobusyq[itype],
57 sd->lb_nobusyg[itype]);
58 }
59 seq_printf(seq,
60 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
61 sd->alb_count, sd->alb_failed, sd->alb_pushed,
62 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
63 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
64 sd->ttwu_wake_remote, sd->ttwu_move_affine,
65 sd->ttwu_move_balance);
66 }
67 preempt_enable();
68#endif
69 }
70 kfree(mask_str);
71 return 0;
72}
73
74static int schedstat_open(struct inode *inode, struct file *file)
75{
76 unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
77 char *buf = kmalloc(size, GFP_KERNEL);
78 struct seq_file *m;
79 int res;
80
81 if (!buf)
82 return -ENOMEM;
83 res = single_open(file, show_schedstat, NULL);
84 if (!res) {
85 m = file->private_data;
86 m->buf = buf;
87 m->size = size;
88 } else
89 kfree(buf);
90 return res;
91}
92
93static const struct file_operations proc_schedstat_operations = {
94 .open = schedstat_open,
95 .read = seq_read,
96 .llseek = seq_lseek,
97 .release = single_release,
98};
99
100static int __init proc_schedstat_init(void)
101{
102 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
103 return 0;
104}
105module_init(proc_schedstat_init);
106
107
108
109
110static inline void
111rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
112{
113 if (rq) {
114 rq->rq_sched_info.run_delay += delta;
115 rq->rq_sched_info.pcount++;
116 }
117}
118
119
120
121
122static inline void
123rq_sched_info_depart(struct rq *rq, unsigned long long delta)
124{
125 if (rq)
126 rq->rq_sched_info.cpu_time += delta;
127}
128
129static inline void
130rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
131{
132 if (rq)
133 rq->rq_sched_info.run_delay += delta;
134}
135# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
136# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
137# define schedstat_set(var, val) do { var = (val); } while (0)
138#else
139static inline void
140rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
141{}
142static inline void
143rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
144{}
145static inline void
146rq_sched_info_depart(struct rq *rq, unsigned long long delta)
147{}
148# define schedstat_inc(rq, field) do { } while (0)
149# define schedstat_add(rq, field, amt) do { } while (0)
150# define schedstat_set(var, val) do { } while (0)
151#endif
152
153#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
154static inline void sched_info_reset_dequeued(struct task_struct *t)
155{
156 t->sched_info.last_queued = 0;
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173static inline void sched_info_dequeued(struct task_struct *t)
174{
175 unsigned long long now = task_rq(t)->clock, delta = 0;
176
177 if (unlikely(sched_info_on()))
178 if (t->sched_info.last_queued)
179 delta = now - t->sched_info.last_queued;
180 sched_info_reset_dequeued(t);
181 t->sched_info.run_delay += delta;
182
183 rq_sched_info_dequeued(task_rq(t), delta);
184}
185
186
187
188
189
190
191static void sched_info_arrive(struct task_struct *t)
192{
193 unsigned long long now = task_rq(t)->clock, delta = 0;
194
195 if (t->sched_info.last_queued)
196 delta = now - t->sched_info.last_queued;
197 sched_info_reset_dequeued(t);
198 t->sched_info.run_delay += delta;
199 t->sched_info.last_arrival = now;
200 t->sched_info.pcount++;
201
202 rq_sched_info_arrive(task_rq(t), delta);
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220static inline void sched_info_queued(struct task_struct *t)
221{
222 if (unlikely(sched_info_on()))
223 if (!t->sched_info.last_queued)
224 t->sched_info.last_queued = task_rq(t)->clock;
225}
226
227
228
229
230
231
232
233
234static inline void sched_info_depart(struct task_struct *t)
235{
236 unsigned long long delta = task_rq(t)->clock -
237 t->sched_info.last_arrival;
238
239 t->sched_info.cpu_time += delta;
240 rq_sched_info_depart(task_rq(t), delta);
241
242 if (t->state == TASK_RUNNING)
243 sched_info_queued(t);
244}
245
246
247
248
249
250
251static inline void
252__sched_info_switch(struct task_struct *prev, struct task_struct *next)
253{
254 struct rq *rq = task_rq(prev);
255
256
257
258
259
260
261 if (prev != rq->idle)
262 sched_info_depart(prev);
263
264 if (next != rq->idle)
265 sched_info_arrive(next);
266}
267static inline void
268sched_info_switch(struct task_struct *prev, struct task_struct *next)
269{
270 if (unlikely(sched_info_on()))
271 __sched_info_switch(prev, next);
272}
273#else
274#define sched_info_queued(t) do { } while (0)
275#define sched_info_reset_dequeued(t) do { } while (0)
276#define sched_info_dequeued(t) do { } while (0)
277#define sched_info_switch(t, next) do { } while (0)
278#endif
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime)
298{
299 struct signal_struct *sig;
300
301
302 if (unlikely(tsk->exit_state))
303 return;
304
305 sig = tsk->signal;
306 if (sig->cputime.totals) {
307 struct task_cputime *times;
308
309 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
310 times->utime = cputime_add(times->utime, cputime);
311 put_cpu_no_resched();
312 }
313}
314
315
316
317
318
319
320
321
322
323
324
325static inline void account_group_system_time(struct task_struct *tsk,
326 cputime_t cputime)
327{
328 struct signal_struct *sig;
329
330
331 if (unlikely(tsk->exit_state))
332 return;
333
334 sig = tsk->signal;
335 if (sig->cputime.totals) {
336 struct task_cputime *times;
337
338 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
339 times->stime = cputime_add(times->stime, cputime);
340 put_cpu_no_resched();
341 }
342}
343
344
345
346
347
348
349
350
351
352
353
354static inline void account_group_exec_runtime(struct task_struct *tsk,
355 unsigned long long ns)
356{
357 struct signal_struct *sig;
358
359 sig = tsk->signal;
360
361 barrier();
362 if (unlikely(!sig))
363 return;
364
365 if (sig->cputime.totals) {
366 struct task_cputime *times;
367
368 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
369 times->sum_exec_runtime += ns;
370 put_cpu_no_resched();
371 }
372}