1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/cpufreq.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/acpi.h>
38#include <linux/dmi.h>
39#include <linux/moduleparam.h>
40#include <linux/sched.h>
41#include <linux/pm_qos_params.h>
42#include <linux/clockchips.h>
43#include <linux/cpuidle.h>
44
45
46
47
48
49
50
51#ifdef CONFIG_X86
52#include <asm/apic.h>
53#endif
54
55#include <asm/io.h>
56#include <asm/uaccess.h>
57
58#include <acpi/acpi_bus.h>
59#include <acpi/processor.h>
60#include <asm/processor.h>
61
62#define ACPI_PROCESSOR_CLASS "processor"
63#define _COMPONENT ACPI_PROCESSOR_COMPONENT
64ACPI_MODULE_NAME("processor_idle");
65#define ACPI_PROCESSOR_FILE_POWER "power"
66#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68#ifndef CONFIG_CPU_IDLE
69#define C2_OVERHEAD 4
70#define C3_OVERHEAD 4
71static void (*pm_idle_save) (void) __read_mostly;
72#else
73#define C2_OVERHEAD 1
74#define C3_OVERHEAD 1
75#endif
76#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
77
78static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
79#ifdef CONFIG_CPU_IDLE
80module_param(max_cstate, uint, 0000);
81#else
82module_param(max_cstate, uint, 0644);
83#endif
84static unsigned int nocst __read_mostly;
85module_param(nocst, uint, 0000);
86
87#ifndef CONFIG_CPU_IDLE
88
89
90
91
92
93
94
95static unsigned int bm_history __read_mostly =
96 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
97module_param(bm_history, uint, 0644);
98
99static int acpi_processor_set_power_policy(struct acpi_processor *pr);
100
101#else
102static unsigned int latency_factor __read_mostly = 2;
103module_param(latency_factor, uint, 0644);
104#endif
105
106
107
108
109
110
111
112static int set_max_cstate(const struct dmi_system_id *id)
113{
114 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
115 return 0;
116
117 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
118 " Override with \"processor.max_cstate=%d\"\n", id->ident,
119 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
120
121 max_cstate = (long)id->driver_data;
122
123 return 0;
124}
125
126
127
128static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
129 { set_max_cstate, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
132 { set_max_cstate, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
135 { set_max_cstate, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
138 { set_max_cstate, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
141 { set_max_cstate, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
144 { set_max_cstate, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
147 { set_max_cstate, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
150 { set_max_cstate, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
153 { set_max_cstate, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
156 { set_max_cstate, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
159 { set_max_cstate, "IBM ThinkPad R40e", {
160 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
162 { set_max_cstate, "IBM ThinkPad R40e", {
163 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
165 { set_max_cstate, "IBM ThinkPad R40e", {
166 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
168 { set_max_cstate, "IBM ThinkPad R40e", {
169 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
171 { set_max_cstate, "IBM ThinkPad R40e", {
172 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
174 { set_max_cstate, "IBM ThinkPad R40e", {
175 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
176 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
177 { set_max_cstate, "Medion 41700", {
178 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
180 { set_max_cstate, "Clevo 5600D", {
181 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
182 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
183 (void *)2},
184 {},
185};
186
187static inline u32 ticks_elapsed(u32 t1, u32 t2)
188{
189 if (t2 >= t1)
190 return (t2 - t1);
191 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
192 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
193 else
194 return ((0xFFFFFFFF - t1) + t2);
195}
196
197static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
198{
199 if (t2 >= t1)
200 return PM_TIMER_TICKS_TO_US(t2 - t1);
201 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
202 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
203 else
204 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
205}
206
207
208
209
210
211static void acpi_safe_halt(void)
212{
213 current_thread_info()->status &= ~TS_POLLING;
214
215
216
217
218 smp_mb();
219 if (!need_resched()) {
220 safe_halt();
221 local_irq_disable();
222 }
223 current_thread_info()->status |= TS_POLLING;
224}
225
226#ifndef CONFIG_CPU_IDLE
227
228static void
229acpi_processor_power_activate(struct acpi_processor *pr,
230 struct acpi_processor_cx *new)
231{
232 struct acpi_processor_cx *old;
233
234 if (!pr || !new)
235 return;
236
237 old = pr->power.state;
238
239 if (old)
240 old->promotion.count = 0;
241 new->demotion.count = 0;
242
243
244 if (old) {
245 switch (old->type) {
246 case ACPI_STATE_C3:
247
248 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
249 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
250 break;
251 }
252 }
253
254
255 switch (new->type) {
256 case ACPI_STATE_C3:
257
258 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
259 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
260 break;
261 }
262
263 pr->power.state = new;
264
265 return;
266}
267
268static atomic_t c3_cpu_count;
269
270
271static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
272{
273
274 stop_critical_timings();
275 if (cstate->entry_method == ACPI_CSTATE_FFH) {
276
277 acpi_processor_ffh_cstate_enter(cstate);
278 } else {
279 int unused;
280
281 inb(cstate->address);
282
283
284
285 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
286 }
287 start_critical_timings();
288}
289#endif
290
291#ifdef ARCH_APICTIMER_STOPS_ON_C3
292
293
294
295
296
297
298
299static void acpi_timer_check_state(int state, struct acpi_processor *pr,
300 struct acpi_processor_cx *cx)
301{
302 struct acpi_processor_power *pwr = &pr->power;
303 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
304
305
306
307
308
309 if (pwr->timer_broadcast_on_state < state)
310 return;
311
312 if (cx->type >= type)
313 pr->power.timer_broadcast_on_state = state;
314}
315
316static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
317{
318 unsigned long reason;
319
320 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
321 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
322
323 clockevents_notify(reason, &pr->id);
324}
325
326
327static void acpi_state_timer_broadcast(struct acpi_processor *pr,
328 struct acpi_processor_cx *cx,
329 int broadcast)
330{
331 int state = cx - pr->power.states;
332
333 if (state >= pr->power.timer_broadcast_on_state) {
334 unsigned long reason;
335
336 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
337 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
338 clockevents_notify(reason, &pr->id);
339 }
340}
341
342#else
343
344static void acpi_timer_check_state(int state, struct acpi_processor *pr,
345 struct acpi_processor_cx *cstate) { }
346static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
347static void acpi_state_timer_broadcast(struct acpi_processor *pr,
348 struct acpi_processor_cx *cx,
349 int broadcast)
350{
351}
352
353#endif
354
355
356
357
358static int acpi_idle_suspend;
359
360int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
361{
362 acpi_idle_suspend = 1;
363 return 0;
364}
365
366int acpi_processor_resume(struct acpi_device * device)
367{
368 acpi_idle_suspend = 0;
369 return 0;
370}
371
372#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
373static int tsc_halts_in_c(int state)
374{
375 switch (boot_cpu_data.x86_vendor) {
376 case X86_VENDOR_AMD:
377
378
379
380
381 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
382 return 0;
383
384 case X86_VENDOR_INTEL:
385
386 default:
387 return state > ACPI_STATE_C1;
388 }
389}
390#endif
391
392#ifndef CONFIG_CPU_IDLE
393static void acpi_processor_idle(void)
394{
395 struct acpi_processor *pr = NULL;
396 struct acpi_processor_cx *cx = NULL;
397 struct acpi_processor_cx *next_state = NULL;
398 int sleep_ticks = 0;
399 u32 t1, t2 = 0;
400
401
402
403
404
405 local_irq_disable();
406
407 pr = __get_cpu_var(processors);
408 if (!pr) {
409 local_irq_enable();
410 return;
411 }
412
413
414
415
416
417 if (unlikely(need_resched())) {
418 local_irq_enable();
419 return;
420 }
421
422 cx = pr->power.state;
423 if (!cx || acpi_idle_suspend) {
424 if (pm_idle_save) {
425 pm_idle_save();
426 } else {
427 acpi_safe_halt();
428 local_irq_enable();
429 }
430
431 return;
432 }
433
434
435
436
437
438
439
440 if (pr->flags.bm_check) {
441 u32 bm_status = 0;
442 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
443
444 if (diff > 31)
445 diff = 31;
446
447 pr->power.bm_activity <<= diff;
448
449 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
450 if (bm_status) {
451 pr->power.bm_activity |= 0x1;
452 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
453 }
454
455
456
457
458
459 else if (errata.piix4.bmisx) {
460 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
461 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
462 pr->power.bm_activity |= 0x1;
463 }
464
465 pr->power.bm_check_timestamp = jiffies;
466
467
468
469
470
471
472
473
474
475
476
477
478
479 if ((pr->power.bm_activity & 0x1) &&
480 cx->demotion.threshold.bm) {
481 local_irq_enable();
482 next_state = cx->demotion.state;
483 goto end;
484 }
485 }
486
487#ifdef CONFIG_HOTPLUG_CPU
488
489
490
491
492
493 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
494 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
495 cx = &pr->power.states[ACPI_STATE_C1];
496#endif
497
498
499
500
501
502
503 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
504 current_thread_info()->status &= ~TS_POLLING;
505
506
507
508
509 smp_mb();
510 if (need_resched()) {
511 current_thread_info()->status |= TS_POLLING;
512 local_irq_enable();
513 return;
514 }
515 }
516
517 switch (cx->type) {
518
519 case ACPI_STATE_C1:
520
521
522
523
524
525 if (pm_idle_save) {
526 pm_idle_save();
527 } else {
528 acpi_safe_halt();
529 local_irq_enable();
530 }
531
532
533
534
535
536
537
538
539
540 sleep_ticks = 0xFFFFFFFF;
541
542 break;
543
544 case ACPI_STATE_C2:
545
546 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
547
548 sched_clock_idle_sleep_event();
549
550 acpi_state_timer_broadcast(pr, cx, 1);
551 acpi_cstate_enter(cx);
552
553 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
554
555#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
556
557 if (tsc_halts_in_c(ACPI_STATE_C2))
558 mark_tsc_unstable("possible TSC halt in C2");
559#endif
560
561 sleep_ticks = ticks_elapsed(t1, t2);
562
563
564 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
565
566
567 local_irq_enable();
568
569 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
570
571 current_thread_info()->status |= TS_POLLING;
572 acpi_state_timer_broadcast(pr, cx, 0);
573 break;
574
575 case ACPI_STATE_C3:
576 acpi_unlazy_tlb(smp_processor_id());
577
578
579
580
581 acpi_state_timer_broadcast(pr, cx, 1);
582
583
584
585
586
587
588
589
590
591
592 if (pr->flags.bm_check && pr->flags.bm_control) {
593 if (atomic_inc_return(&c3_cpu_count) ==
594 num_online_cpus()) {
595
596
597
598
599 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
600 }
601 } else if (!pr->flags.bm_check) {
602
603 ACPI_FLUSH_CPU_CACHE();
604 }
605
606
607 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
608
609
610 sched_clock_idle_sleep_event();
611 acpi_cstate_enter(cx);
612
613 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
614 if (pr->flags.bm_check && pr->flags.bm_control) {
615
616 atomic_dec(&c3_cpu_count);
617 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
618 }
619
620#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
621
622 if (tsc_halts_in_c(ACPI_STATE_C3))
623 mark_tsc_unstable("TSC halts in C3");
624#endif
625
626 sleep_ticks = ticks_elapsed(t1, t2);
627
628 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
629
630
631 local_irq_enable();
632
633 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
634
635 current_thread_info()->status |= TS_POLLING;
636 acpi_state_timer_broadcast(pr, cx, 0);
637 break;
638
639 default:
640 local_irq_enable();
641 return;
642 }
643 cx->usage++;
644 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
645 cx->time += sleep_ticks;
646
647 next_state = pr->power.state;
648
649#ifdef CONFIG_HOTPLUG_CPU
650
651 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
652 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
653 next_state = cx;
654 goto end;
655 }
656#endif
657
658
659
660
661
662
663
664
665
666 if (cx->promotion.state &&
667 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
668 if (sleep_ticks > cx->promotion.threshold.ticks &&
669 cx->promotion.state->latency <=
670 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
671 cx->promotion.count++;
672 cx->demotion.count = 0;
673 if (cx->promotion.count >=
674 cx->promotion.threshold.count) {
675 if (pr->flags.bm_check) {
676 if (!
677 (pr->power.bm_activity & cx->
678 promotion.threshold.bm)) {
679 next_state =
680 cx->promotion.state;
681 goto end;
682 }
683 } else {
684 next_state = cx->promotion.state;
685 goto end;
686 }
687 }
688 }
689 }
690
691
692
693
694
695
696
697 if (cx->demotion.state) {
698 if (sleep_ticks < cx->demotion.threshold.ticks) {
699 cx->demotion.count++;
700 cx->promotion.count = 0;
701 if (cx->demotion.count >= cx->demotion.threshold.count) {
702 next_state = cx->demotion.state;
703 goto end;
704 }
705 }
706 }
707
708 end:
709
710
711
712
713 if ((pr->power.state - pr->power.states) > max_cstate ||
714 pr->power.state->latency >
715 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
716 if (cx->demotion.state)
717 next_state = cx->demotion.state;
718 }
719
720
721
722
723
724
725
726 if (next_state != pr->power.state)
727 acpi_processor_power_activate(pr, next_state);
728}
729
730static int acpi_processor_set_power_policy(struct acpi_processor *pr)
731{
732 unsigned int i;
733 unsigned int state_is_set = 0;
734 struct acpi_processor_cx *lower = NULL;
735 struct acpi_processor_cx *higher = NULL;
736 struct acpi_processor_cx *cx;
737
738
739 if (!pr)
740 return -EINVAL;
741
742
743
744
745
746
747
748
749
750
751
752 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
753 cx = &pr->power.states[i];
754 if (!cx->valid)
755 continue;
756
757 if (!state_is_set)
758 pr->power.state = cx;
759 state_is_set++;
760 break;
761 }
762
763 if (!state_is_set)
764 return -ENODEV;
765
766
767 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
768 cx = &pr->power.states[i];
769 if (!cx->valid)
770 continue;
771
772 if (lower) {
773 cx->demotion.state = lower;
774 cx->demotion.threshold.ticks = cx->latency_ticks;
775 cx->demotion.threshold.count = 1;
776 if (cx->type == ACPI_STATE_C3)
777 cx->demotion.threshold.bm = bm_history;
778 }
779
780 lower = cx;
781 }
782
783
784 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
785 cx = &pr->power.states[i];
786 if (!cx->valid)
787 continue;
788
789 if (higher) {
790 cx->promotion.state = higher;
791 cx->promotion.threshold.ticks = cx->latency_ticks;
792 if (cx->type >= ACPI_STATE_C2)
793 cx->promotion.threshold.count = 4;
794 else
795 cx->promotion.threshold.count = 10;
796 if (higher->type == ACPI_STATE_C3)
797 cx->promotion.threshold.bm = bm_history;
798 }
799
800 higher = cx;
801 }
802
803 return 0;
804}
805#endif
806
807static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
808{
809
810 if (!pr)
811 return -EINVAL;
812
813 if (!pr->pblk)
814 return -ENODEV;
815
816
817 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
818 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
819
820#ifndef CONFIG_HOTPLUG_CPU
821
822
823
824
825 if ((num_online_cpus() > 1) &&
826 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
827 return -ENODEV;
828#endif
829
830
831 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
832 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
833
834
835 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
836 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
837
838 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
839 "lvl2[0x%08x] lvl3[0x%08x]\n",
840 pr->power.states[ACPI_STATE_C2].address,
841 pr->power.states[ACPI_STATE_C3].address));
842
843 return 0;
844}
845
846static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
847{
848 if (!pr->power.states[ACPI_STATE_C1].valid) {
849
850
851 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
852 pr->power.states[ACPI_STATE_C1].valid = 1;
853 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
854 }
855
856 pr->power.states[ACPI_STATE_C0].valid = 1;
857 return 0;
858}
859
860static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
861{
862 acpi_status status = 0;
863 acpi_integer count;
864 int current_count;
865 int i;
866 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
867 union acpi_object *cst;
868
869
870 if (nocst)
871 return -ENODEV;
872
873 current_count = 0;
874
875 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
876 if (ACPI_FAILURE(status)) {
877 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
878 return -ENODEV;
879 }
880
881 cst = buffer.pointer;
882
883
884 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
885 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
886 status = -EFAULT;
887 goto end;
888 }
889
890 count = cst->package.elements[0].integer.value;
891
892
893 if (count < 1 || count != cst->package.count - 1) {
894 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
895 status = -EFAULT;
896 goto end;
897 }
898
899
900 pr->flags.has_cst = 1;
901
902 for (i = 1; i <= count; i++) {
903 union acpi_object *element;
904 union acpi_object *obj;
905 struct acpi_power_register *reg;
906 struct acpi_processor_cx cx;
907
908 memset(&cx, 0, sizeof(cx));
909
910 element = &(cst->package.elements[i]);
911 if (element->type != ACPI_TYPE_PACKAGE)
912 continue;
913
914 if (element->package.count != 4)
915 continue;
916
917 obj = &(element->package.elements[0]);
918
919 if (obj->type != ACPI_TYPE_BUFFER)
920 continue;
921
922 reg = (struct acpi_power_register *)obj->buffer.pointer;
923
924 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
925 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
926 continue;
927
928
929 obj = &(element->package.elements[1]);
930 if (obj->type != ACPI_TYPE_INTEGER)
931 continue;
932
933 cx.type = obj->integer.value;
934
935
936
937
938 if (i == 1 && cx.type != ACPI_STATE_C1)
939 current_count++;
940
941 cx.address = reg->address;
942 cx.index = current_count + 1;
943
944 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
945 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
946 if (acpi_processor_ffh_cstate_probe
947 (pr->id, &cx, reg) == 0) {
948 cx.entry_method = ACPI_CSTATE_FFH;
949 } else if (cx.type == ACPI_STATE_C1) {
950
951
952
953
954
955
956 cx.entry_method = ACPI_CSTATE_HALT;
957 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
958 } else {
959 continue;
960 }
961 if (cx.type == ACPI_STATE_C1 &&
962 (idle_halt || idle_nomwait)) {
963
964
965
966
967
968
969
970
971
972
973 cx.entry_method = ACPI_CSTATE_HALT;
974 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
975 }
976 } else {
977 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
978 cx.address);
979 }
980
981 if (cx.type == ACPI_STATE_C1) {
982 cx.valid = 1;
983 }
984
985 obj = &(element->package.elements[2]);
986 if (obj->type != ACPI_TYPE_INTEGER)
987 continue;
988
989 cx.latency = obj->integer.value;
990
991 obj = &(element->package.elements[3]);
992 if (obj->type != ACPI_TYPE_INTEGER)
993 continue;
994
995 cx.power = obj->integer.value;
996
997 current_count++;
998 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
999
1000
1001
1002
1003
1004 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
1005 printk(KERN_WARNING
1006 "Limiting number of power states to max (%d)\n",
1007 ACPI_PROCESSOR_MAX_POWER);
1008 printk(KERN_WARNING
1009 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1010 break;
1011 }
1012 }
1013
1014 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
1015 current_count));
1016
1017
1018 if (current_count < 2)
1019 status = -EFAULT;
1020
1021 end:
1022 kfree(buffer.pointer);
1023
1024 return status;
1025}
1026
1027static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1028{
1029
1030 if (!cx->address)
1031 return;
1032
1033
1034
1035
1036
1037 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1038 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1039 "latency too large [%d]\n", cx->latency));
1040 return;
1041 }
1042
1043
1044
1045
1046
1047 cx->valid = 1;
1048
1049#ifndef CONFIG_CPU_IDLE
1050 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1051#else
1052 cx->latency_ticks = cx->latency;
1053#endif
1054
1055 return;
1056}
1057
1058static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1059 struct acpi_processor_cx *cx)
1060{
1061 static int bm_check_flag;
1062
1063
1064 if (!cx->address)
1065 return;
1066
1067
1068
1069
1070
1071 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1072 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1073 "latency too large [%d]\n", cx->latency));
1074 return;
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084 else if (errata.piix4.fdma) {
1085 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1086 "C3 not supported on PIIX4 with Type-F DMA\n"));
1087 return;
1088 }
1089
1090
1091 if (!bm_check_flag) {
1092
1093 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1094 bm_check_flag = pr->flags.bm_check;
1095 } else {
1096 pr->flags.bm_check = bm_check_flag;
1097 }
1098
1099 if (pr->flags.bm_check) {
1100 if (!pr->flags.bm_control) {
1101 if (pr->flags.has_cst != 1) {
1102
1103 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1104 "C3 support requires BM control\n"));
1105 return;
1106 } else {
1107
1108 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1109 "C3 support without BM control\n"));
1110 }
1111 }
1112 } else {
1113
1114
1115
1116
1117 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1118 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1119 "Cache invalidation should work properly"
1120 " for C3 to be enabled on SMP systems\n"));
1121 return;
1122 }
1123 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1124 }
1125
1126
1127
1128
1129
1130
1131
1132 cx->valid = 1;
1133
1134#ifndef CONFIG_CPU_IDLE
1135 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1136#else
1137 cx->latency_ticks = cx->latency;
1138#endif
1139
1140 return;
1141}
1142
1143static int acpi_processor_power_verify(struct acpi_processor *pr)
1144{
1145 unsigned int i;
1146 unsigned int working = 0;
1147
1148 pr->power.timer_broadcast_on_state = INT_MAX;
1149
1150 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1151 struct acpi_processor_cx *cx = &pr->power.states[i];
1152
1153 switch (cx->type) {
1154 case ACPI_STATE_C1:
1155 cx->valid = 1;
1156 break;
1157
1158 case ACPI_STATE_C2:
1159 acpi_processor_power_verify_c2(cx);
1160 if (cx->valid)
1161 acpi_timer_check_state(i, pr, cx);
1162 break;
1163
1164 case ACPI_STATE_C3:
1165 acpi_processor_power_verify_c3(pr, cx);
1166 if (cx->valid)
1167 acpi_timer_check_state(i, pr, cx);
1168 break;
1169 }
1170
1171 if (cx->valid)
1172 working++;
1173 }
1174
1175 acpi_propagate_timer_broadcast(pr);
1176
1177 return (working);
1178}
1179
1180static int acpi_processor_get_power_info(struct acpi_processor *pr)
1181{
1182 unsigned int i;
1183 int result;
1184
1185
1186
1187
1188
1189
1190 memset(pr->power.states, 0, sizeof(pr->power.states));
1191
1192 result = acpi_processor_get_power_info_cst(pr);
1193 if (result == -ENODEV)
1194 result = acpi_processor_get_power_info_fadt(pr);
1195
1196 if (result)
1197 return result;
1198
1199 acpi_processor_get_power_info_default(pr);
1200
1201 pr->power.count = acpi_processor_power_verify(pr);
1202
1203#ifndef CONFIG_CPU_IDLE
1204
1205
1206
1207
1208
1209
1210
1211
1212 result = acpi_processor_set_power_policy(pr);
1213 if (result)
1214 return result;
1215#endif
1216
1217
1218
1219
1220
1221 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1222 if (pr->power.states[i].valid) {
1223 pr->power.count = i;
1224 if (pr->power.states[i].type >= ACPI_STATE_C2)
1225 pr->flags.power = 1;
1226 }
1227 }
1228
1229 return 0;
1230}
1231
1232static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1233{
1234 struct acpi_processor *pr = seq->private;
1235 unsigned int i;
1236
1237
1238 if (!pr)
1239 goto end;
1240
1241 seq_printf(seq, "active state: C%zd\n"
1242 "max_cstate: C%d\n"
1243 "bus master activity: %08x\n"
1244 "maximum allowed latency: %d usec\n",
1245 pr->power.state ? pr->power.state - pr->power.states : 0,
1246 max_cstate, (unsigned)pr->power.bm_activity,
1247 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1248
1249 seq_puts(seq, "states:\n");
1250
1251 for (i = 1; i <= pr->power.count; i++) {
1252 seq_printf(seq, " %cC%d: ",
1253 (&pr->power.states[i] ==
1254 pr->power.state ? '*' : ' '), i);
1255
1256 if (!pr->power.states[i].valid) {
1257 seq_puts(seq, "<not supported>\n");
1258 continue;
1259 }
1260
1261 switch (pr->power.states[i].type) {
1262 case ACPI_STATE_C1:
1263 seq_printf(seq, "type[C1] ");
1264 break;
1265 case ACPI_STATE_C2:
1266 seq_printf(seq, "type[C2] ");
1267 break;
1268 case ACPI_STATE_C3:
1269 seq_printf(seq, "type[C3] ");
1270 break;
1271 default:
1272 seq_printf(seq, "type[--] ");
1273 break;
1274 }
1275
1276 if (pr->power.states[i].promotion.state)
1277 seq_printf(seq, "promotion[C%zd] ",
1278 (pr->power.states[i].promotion.state -
1279 pr->power.states));
1280 else
1281 seq_puts(seq, "promotion[--] ");
1282
1283 if (pr->power.states[i].demotion.state)
1284 seq_printf(seq, "demotion[C%zd] ",
1285 (pr->power.states[i].demotion.state -
1286 pr->power.states));
1287 else
1288 seq_puts(seq, "demotion[--] ");
1289
1290 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1291 pr->power.states[i].latency,
1292 pr->power.states[i].usage,
1293 (unsigned long long)pr->power.states[i].time);
1294 }
1295
1296 end:
1297 return 0;
1298}
1299
1300static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1301{
1302 return single_open(file, acpi_processor_power_seq_show,
1303 PDE(inode)->data);
1304}
1305
1306static const struct file_operations acpi_processor_power_fops = {
1307 .owner = THIS_MODULE,
1308 .open = acpi_processor_power_open_fs,
1309 .read = seq_read,
1310 .llseek = seq_lseek,
1311 .release = single_release,
1312};
1313
1314#ifndef CONFIG_CPU_IDLE
1315
1316int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1317{
1318 int result = 0;
1319
1320 if (boot_option_idle_override)
1321 return 0;
1322
1323 if (!pr)
1324 return -EINVAL;
1325
1326 if (nocst) {
1327 return -ENODEV;
1328 }
1329
1330 if (!pr->flags.power_setup_done)
1331 return -ENODEV;
1332
1333
1334
1335
1336
1337 if (pm_idle_save) {
1338 pm_idle = pm_idle_save;
1339
1340 synchronize_sched();
1341 }
1342
1343 pr->flags.power = 0;
1344 result = acpi_processor_get_power_info(pr);
1345 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1346 pm_idle = acpi_processor_idle;
1347
1348 return result;
1349}
1350
1351#ifdef CONFIG_SMP
1352static void smp_callback(void *v)
1353{
1354
1355}
1356
1357
1358
1359
1360
1361
1362
1363static int acpi_processor_latency_notify(struct notifier_block *b,
1364 unsigned long l, void *v)
1365{
1366 smp_call_function(smp_callback, NULL, 1);
1367 return NOTIFY_OK;
1368}
1369
1370static struct notifier_block acpi_processor_latency_notifier = {
1371 .notifier_call = acpi_processor_latency_notify,
1372};
1373
1374#endif
1375
1376#else
1377
1378
1379
1380
1381static int acpi_idle_bm_check(void)
1382{
1383 u32 bm_status = 0;
1384
1385 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1386 if (bm_status)
1387 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1388
1389
1390
1391
1392
1393 else if (errata.piix4.bmisx) {
1394 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1395 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1396 bm_status = 1;
1397 }
1398 return bm_status;
1399}
1400
1401
1402
1403
1404
1405
1406static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1407 struct acpi_processor_cx *target)
1408{
1409 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1410 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1411 pr->flags.bm_rld_set = 0;
1412 }
1413
1414 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1415 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1416 pr->flags.bm_rld_set = 1;
1417 }
1418}
1419
1420
1421
1422
1423
1424
1425
1426static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1427{
1428
1429 stop_critical_timings();
1430 if (cx->entry_method == ACPI_CSTATE_FFH) {
1431
1432 acpi_processor_ffh_cstate_enter(cx);
1433 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1434 acpi_safe_halt();
1435 } else {
1436 int unused;
1437
1438 inb(cx->address);
1439
1440
1441
1442 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1443 }
1444 start_critical_timings();
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1455 struct cpuidle_state *state)
1456{
1457 u32 t1, t2;
1458 struct acpi_processor *pr;
1459 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1460
1461 pr = __get_cpu_var(processors);
1462
1463 if (unlikely(!pr))
1464 return 0;
1465
1466 local_irq_disable();
1467
1468
1469 if (acpi_idle_suspend) {
1470 acpi_safe_halt();
1471 local_irq_enable();
1472 return 0;
1473 }
1474
1475 if (pr->flags.bm_check)
1476 acpi_idle_update_bm_rld(pr, cx);
1477
1478 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1479 acpi_idle_do_entry(cx);
1480 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1481
1482 local_irq_enable();
1483 cx->usage++;
1484
1485 return ticks_elapsed_in_us(t1, t2);
1486}
1487
1488
1489
1490
1491
1492
1493static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1494 struct cpuidle_state *state)
1495{
1496 struct acpi_processor *pr;
1497 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1498 u32 t1, t2;
1499 int sleep_ticks = 0;
1500
1501 pr = __get_cpu_var(processors);
1502
1503 if (unlikely(!pr))
1504 return 0;
1505
1506 if (acpi_idle_suspend)
1507 return(acpi_idle_enter_c1(dev, state));
1508
1509 local_irq_disable();
1510 current_thread_info()->status &= ~TS_POLLING;
1511
1512
1513
1514
1515 smp_mb();
1516
1517 if (unlikely(need_resched())) {
1518 current_thread_info()->status |= TS_POLLING;
1519 local_irq_enable();
1520 return 0;
1521 }
1522
1523
1524
1525
1526
1527 acpi_state_timer_broadcast(pr, cx, 1);
1528
1529 if (pr->flags.bm_check)
1530 acpi_idle_update_bm_rld(pr, cx);
1531
1532 if (cx->type == ACPI_STATE_C3)
1533 ACPI_FLUSH_CPU_CACHE();
1534
1535 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1536
1537 sched_clock_idle_sleep_event();
1538 acpi_idle_do_entry(cx);
1539 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1540
1541#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1542
1543 if (tsc_halts_in_c(cx->type))
1544 mark_tsc_unstable("TSC halts in idle");;
1545#endif
1546 sleep_ticks = ticks_elapsed(t1, t2);
1547
1548
1549 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1550
1551 local_irq_enable();
1552 current_thread_info()->status |= TS_POLLING;
1553
1554 cx->usage++;
1555
1556 acpi_state_timer_broadcast(pr, cx, 0);
1557 cx->time += sleep_ticks;
1558 return ticks_elapsed_in_us(t1, t2);
1559}
1560
1561static int c3_cpu_count;
1562static DEFINE_SPINLOCK(c3_lock);
1563
1564
1565
1566
1567
1568
1569
1570
1571static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1572 struct cpuidle_state *state)
1573{
1574 struct acpi_processor *pr;
1575 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1576 u32 t1, t2;
1577 int sleep_ticks = 0;
1578
1579 pr = __get_cpu_var(processors);
1580
1581 if (unlikely(!pr))
1582 return 0;
1583
1584 if (acpi_idle_suspend)
1585 return(acpi_idle_enter_c1(dev, state));
1586
1587 if (acpi_idle_bm_check()) {
1588 if (dev->safe_state) {
1589 dev->last_state = dev->safe_state;
1590 return dev->safe_state->enter(dev, dev->safe_state);
1591 } else {
1592 local_irq_disable();
1593 acpi_safe_halt();
1594 local_irq_enable();
1595 return 0;
1596 }
1597 }
1598
1599 local_irq_disable();
1600 current_thread_info()->status &= ~TS_POLLING;
1601
1602
1603
1604
1605 smp_mb();
1606
1607 if (unlikely(need_resched())) {
1608 current_thread_info()->status |= TS_POLLING;
1609 local_irq_enable();
1610 return 0;
1611 }
1612
1613 acpi_unlazy_tlb(smp_processor_id());
1614
1615
1616 sched_clock_idle_sleep_event();
1617
1618
1619
1620
1621 acpi_state_timer_broadcast(pr, cx, 1);
1622
1623 acpi_idle_update_bm_rld(pr, cx);
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 if (pr->flags.bm_check && pr->flags.bm_control) {
1636 spin_lock(&c3_lock);
1637 c3_cpu_count++;
1638
1639 if (c3_cpu_count == num_online_cpus())
1640 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1641 spin_unlock(&c3_lock);
1642 } else if (!pr->flags.bm_check) {
1643 ACPI_FLUSH_CPU_CACHE();
1644 }
1645
1646 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1647 acpi_idle_do_entry(cx);
1648 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1649
1650
1651 if (pr->flags.bm_check && pr->flags.bm_control) {
1652 spin_lock(&c3_lock);
1653 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1654 c3_cpu_count--;
1655 spin_unlock(&c3_lock);
1656 }
1657
1658#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1659
1660 if (tsc_halts_in_c(ACPI_STATE_C3))
1661 mark_tsc_unstable("TSC halts in idle");
1662#endif
1663 sleep_ticks = ticks_elapsed(t1, t2);
1664
1665 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1666
1667 local_irq_enable();
1668 current_thread_info()->status |= TS_POLLING;
1669
1670 cx->usage++;
1671
1672 acpi_state_timer_broadcast(pr, cx, 0);
1673 cx->time += sleep_ticks;
1674 return ticks_elapsed_in_us(t1, t2);
1675}
1676
1677struct cpuidle_driver acpi_idle_driver = {
1678 .name = "acpi_idle",
1679 .owner = THIS_MODULE,
1680};
1681
1682
1683
1684
1685
1686static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1687{
1688 int i, count = CPUIDLE_DRIVER_STATE_START;
1689 struct acpi_processor_cx *cx;
1690 struct cpuidle_state *state;
1691 struct cpuidle_device *dev = &pr->power.dev;
1692
1693 if (!pr->flags.power_setup_done)
1694 return -EINVAL;
1695
1696 if (pr->flags.power == 0) {
1697 return -EINVAL;
1698 }
1699
1700 dev->cpu = pr->id;
1701 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1702 dev->states[i].name[0] = '\0';
1703 dev->states[i].desc[0] = '\0';
1704 }
1705
1706 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1707 cx = &pr->power.states[i];
1708 state = &dev->states[count];
1709
1710 if (!cx->valid)
1711 continue;
1712
1713#ifdef CONFIG_HOTPLUG_CPU
1714 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1715 !pr->flags.has_cst &&
1716 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1717 continue;
1718#endif
1719 cpuidle_set_statedata(state, cx);
1720
1721 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1722 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1723 state->exit_latency = cx->latency;
1724 state->target_residency = cx->latency * latency_factor;
1725 state->power_usage = cx->power;
1726
1727 state->flags = 0;
1728 switch (cx->type) {
1729 case ACPI_STATE_C1:
1730 state->flags |= CPUIDLE_FLAG_SHALLOW;
1731 if (cx->entry_method == ACPI_CSTATE_FFH)
1732 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1733
1734 state->enter = acpi_idle_enter_c1;
1735 dev->safe_state = state;
1736 break;
1737
1738 case ACPI_STATE_C2:
1739 state->flags |= CPUIDLE_FLAG_BALANCED;
1740 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1741 state->enter = acpi_idle_enter_simple;
1742 dev->safe_state = state;
1743 break;
1744
1745 case ACPI_STATE_C3:
1746 state->flags |= CPUIDLE_FLAG_DEEP;
1747 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1748 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1749 state->enter = pr->flags.bm_check ?
1750 acpi_idle_enter_bm :
1751 acpi_idle_enter_simple;
1752 break;
1753 }
1754
1755 count++;
1756 if (count == CPUIDLE_STATE_MAX)
1757 break;
1758 }
1759
1760 dev->state_count = count;
1761
1762 if (!count)
1763 return -EINVAL;
1764
1765 return 0;
1766}
1767
1768int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1769{
1770 int ret = 0;
1771
1772 if (boot_option_idle_override)
1773 return 0;
1774
1775 if (!pr)
1776 return -EINVAL;
1777
1778 if (nocst) {
1779 return -ENODEV;
1780 }
1781
1782 if (!pr->flags.power_setup_done)
1783 return -ENODEV;
1784
1785 cpuidle_pause_and_lock();
1786 cpuidle_disable_device(&pr->power.dev);
1787 acpi_processor_get_power_info(pr);
1788 if (pr->flags.power) {
1789 acpi_processor_setup_cpuidle(pr);
1790 ret = cpuidle_enable_device(&pr->power.dev);
1791 }
1792 cpuidle_resume_and_unlock();
1793
1794 return ret;
1795}
1796
1797#endif
1798
1799int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1800 struct acpi_device *device)
1801{
1802 acpi_status status = 0;
1803 static int first_run;
1804 struct proc_dir_entry *entry = NULL;
1805 unsigned int i;
1806
1807 if (boot_option_idle_override)
1808 return 0;
1809
1810 if (!first_run) {
1811 if (idle_halt) {
1812
1813
1814
1815
1816
1817
1818 max_cstate = 1;
1819 }
1820 dmi_check_system(processor_power_dmi_table);
1821 max_cstate = acpi_processor_cstate_check(max_cstate);
1822 if (max_cstate < ACPI_C_STATES_MAX)
1823 printk(KERN_NOTICE
1824 "ACPI: processor limited to max C-state %d\n",
1825 max_cstate);
1826 first_run++;
1827#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1828 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1829 &acpi_processor_latency_notifier);
1830#endif
1831 }
1832
1833 if (!pr)
1834 return -EINVAL;
1835
1836 if (acpi_gbl_FADT.cst_control && !nocst) {
1837 status =
1838 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1839 if (ACPI_FAILURE(status)) {
1840 ACPI_EXCEPTION((AE_INFO, status,
1841 "Notifying BIOS of _CST ability failed"));
1842 }
1843 }
1844
1845 acpi_processor_get_power_info(pr);
1846 pr->flags.power_setup_done = 1;
1847
1848
1849
1850
1851
1852
1853 if (pr->flags.power) {
1854#ifdef CONFIG_CPU_IDLE
1855 acpi_processor_setup_cpuidle(pr);
1856 if (cpuidle_register_device(&pr->power.dev))
1857 return -EIO;
1858#endif
1859
1860 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1861 for (i = 1; i <= pr->power.count; i++)
1862 if (pr->power.states[i].valid)
1863 printk(" C%d[C%d]", i,
1864 pr->power.states[i].type);
1865 printk(")\n");
1866
1867#ifndef CONFIG_CPU_IDLE
1868 if (pr->id == 0) {
1869 pm_idle_save = pm_idle;
1870 pm_idle = acpi_processor_idle;
1871 }
1872#endif
1873 }
1874
1875
1876 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1877 S_IRUGO, acpi_device_dir(device),
1878 &acpi_processor_power_fops,
1879 acpi_driver_data(device));
1880 if (!entry)
1881 return -EIO;
1882 return 0;
1883}
1884
1885int acpi_processor_power_exit(struct acpi_processor *pr,
1886 struct acpi_device *device)
1887{
1888 if (boot_option_idle_override)
1889 return 0;
1890
1891#ifdef CONFIG_CPU_IDLE
1892 cpuidle_unregister_device(&pr->power.dev);
1893#endif
1894 pr->flags.power_setup_done = 0;
1895
1896 if (acpi_device_dir(device))
1897 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1898 acpi_device_dir(device));
1899
1900#ifndef CONFIG_CPU_IDLE
1901
1902
1903 if (pr->id == 0) {
1904 if (pm_idle_save)
1905 pm_idle = pm_idle_save;
1906
1907
1908
1909
1910
1911
1912 cpu_idle_wait();
1913#ifdef CONFIG_SMP
1914 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1915 &acpi_processor_latency_notifier);
1916#endif
1917 }
1918#endif
1919
1920 return 0;
1921}