Showing error 1238

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: drivers/acpi/processor_idle.c
Line in file: 224
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2012-05-21 20:30:05 UTC


Source:

   1/*
   2 * processor_idle - idle state submodule to the ACPI processor driver
   3 *
   4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
   7 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   8 *                          - Added processor hotplug support
   9 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10 *                          - Added support for C3 on SMP
  11 *
  12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13 *
  14 *  This program is free software; you can redistribute it and/or modify
  15 *  it under the terms of the GNU General Public License as published by
  16 *  the Free Software Foundation; either version 2 of the License, or (at
  17 *  your option) any later version.
  18 *
  19 *  This program is distributed in the hope that it will be useful, but
  20 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  21 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  22 *  General Public License for more details.
  23 *
  24 *  You should have received a copy of the GNU General Public License along
  25 *  with this program; if not, write to the Free Software Foundation, Inc.,
  26 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27 *
  28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29 */
  30
  31#include <linux/kernel.h>
  32#include <linux/module.h>
  33#include <linux/init.h>
  34#include <linux/cpufreq.h>
  35#include <linux/proc_fs.h>
  36#include <linux/seq_file.h>
  37#include <linux/acpi.h>
  38#include <linux/dmi.h>
  39#include <linux/moduleparam.h>
  40#include <linux/sched.h>        /* need_resched() */
  41#include <linux/pm_qos_params.h>
  42#include <linux/clockchips.h>
  43#include <linux/cpuidle.h>
  44
  45/*
  46 * Include the apic definitions for x86 to have the APIC timer related defines
  47 * available also for UP (on SMP it gets magically included via linux/smp.h).
  48 * asm/acpi.h is not an option, as it would require more include magic. Also
  49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  50 */
  51#ifdef CONFIG_X86
  52#include <asm/apic.h>
  53#endif
  54
  55#include <asm/io.h>
  56#include <asm/uaccess.h>
  57
  58#include <acpi/acpi_bus.h>
  59#include <acpi/processor.h>
  60#include <asm/processor.h>
  61
  62#define ACPI_PROCESSOR_CLASS            "processor"
  63#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
  64ACPI_MODULE_NAME("processor_idle");
  65#define ACPI_PROCESSOR_FILE_POWER        "power"
  66#define US_TO_PM_TIMER_TICKS(t)                ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
  67#define PM_TIMER_TICK_NS                (1000000000ULL/PM_TIMER_FREQUENCY)
  68#ifndef CONFIG_CPU_IDLE
  69#define C2_OVERHEAD                        4        /* 1us (3.579 ticks per us) */
  70#define C3_OVERHEAD                        4        /* 1us (3.579 ticks per us) */
  71static void (*pm_idle_save) (void) __read_mostly;
  72#else
  73#define C2_OVERHEAD                        1        /* 1us */
  74#define C3_OVERHEAD                        1        /* 1us */
  75#endif
  76#define PM_TIMER_TICKS_TO_US(p)                (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
  77
  78static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  79#ifdef CONFIG_CPU_IDLE
  80module_param(max_cstate, uint, 0000);
  81#else
  82module_param(max_cstate, uint, 0644);
  83#endif
  84static unsigned int nocst __read_mostly;
  85module_param(nocst, uint, 0000);
  86
  87#ifndef CONFIG_CPU_IDLE
  88/*
  89 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
  90 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
  91 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
  92 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  93 * reduce history for more aggressive entry into C3
  94 */
  95static unsigned int bm_history __read_mostly =
  96    (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
  97module_param(bm_history, uint, 0644);
  98
  99static int acpi_processor_set_power_policy(struct acpi_processor *pr);
 100
 101#else        /* CONFIG_CPU_IDLE */
 102static unsigned int latency_factor __read_mostly = 2;
 103module_param(latency_factor, uint, 0644);
 104#endif
 105
 106/*
 107 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
 108 * For now disable this. Probably a bug somewhere else.
 109 *
 110 * To skip this limit, boot/load with a large max_cstate limit.
 111 */
 112static int set_max_cstate(const struct dmi_system_id *id)
 113{
 114        if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
 115                return 0;
 116
 117        printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
 118               " Override with \"processor.max_cstate=%d\"\n", id->ident,
 119               (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
 120
 121        max_cstate = (long)id->driver_data;
 122
 123        return 0;
 124}
 125
 126/* Actually this shouldn't be __cpuinitdata, would be better to fix the
 127   callers to only run once -AK */
 128static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
 129        { set_max_cstate, "IBM ThinkPad R40e", {
 130          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 131          DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
 132        { set_max_cstate, "IBM ThinkPad R40e", {
 133          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 134          DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
 135        { set_max_cstate, "IBM ThinkPad R40e", {
 136          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 137          DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
 138        { set_max_cstate, "IBM ThinkPad R40e", {
 139          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 140          DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
 141        { set_max_cstate, "IBM ThinkPad R40e", {
 142          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 143          DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
 144        { set_max_cstate, "IBM ThinkPad R40e", {
 145          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 146          DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
 147        { set_max_cstate, "IBM ThinkPad R40e", {
 148          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 149          DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
 150        { set_max_cstate, "IBM ThinkPad R40e", {
 151          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 152          DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
 153        { set_max_cstate, "IBM ThinkPad R40e", {
 154          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 155          DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
 156        { set_max_cstate, "IBM ThinkPad R40e", {
 157          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 158          DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
 159        { set_max_cstate, "IBM ThinkPad R40e", {
 160          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 161          DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
 162        { set_max_cstate, "IBM ThinkPad R40e", {
 163          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 164          DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
 165        { set_max_cstate, "IBM ThinkPad R40e", {
 166          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 167          DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
 168        { set_max_cstate, "IBM ThinkPad R40e", {
 169          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 170          DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
 171        { set_max_cstate, "IBM ThinkPad R40e", {
 172          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 173          DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
 174        { set_max_cstate, "IBM ThinkPad R40e", {
 175          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
 176          DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
 177        { set_max_cstate, "Medion 41700", {
 178          DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
 179          DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
 180        { set_max_cstate, "Clevo 5600D", {
 181          DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
 182          DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
 183         (void *)2},
 184        {},
 185};
 186
 187static inline u32 ticks_elapsed(u32 t1, u32 t2)
 188{
 189        if (t2 >= t1)
 190                return (t2 - t1);
 191        else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
 192                return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
 193        else
 194                return ((0xFFFFFFFF - t1) + t2);
 195}
 196
 197static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
 198{
 199        if (t2 >= t1)
 200                return PM_TIMER_TICKS_TO_US(t2 - t1);
 201        else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
 202                return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
 203        else
 204                return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
 205}
 206
 207/*
 208 * Callers should disable interrupts before the call and enable
 209 * interrupts after return.
 210 */
 211static void acpi_safe_halt(void)
 212{
 213        current_thread_info()->status &= ~TS_POLLING;
 214        /*
 215         * TS_POLLING-cleared state must be visible before we
 216         * test NEED_RESCHED:
 217         */
 218        smp_mb();
 219        if (!need_resched()) {
 220                safe_halt();
 221                local_irq_disable();
 222        }
 223        current_thread_info()->status |= TS_POLLING;
 224}
 225
 226#ifndef CONFIG_CPU_IDLE
 227
 228static void
 229acpi_processor_power_activate(struct acpi_processor *pr,
 230                              struct acpi_processor_cx *new)
 231{
 232        struct acpi_processor_cx *old;
 233
 234        if (!pr || !new)
 235                return;
 236
 237        old = pr->power.state;
 238
 239        if (old)
 240                old->promotion.count = 0;
 241        new->demotion.count = 0;
 242
 243        /* Cleanup from old state. */
 244        if (old) {
 245                switch (old->type) {
 246                case ACPI_STATE_C3:
 247                        /* Disable bus master reload */
 248                        if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
 249                                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
 250                        break;
 251                }
 252        }
 253
 254        /* Prepare to use new state. */
 255        switch (new->type) {
 256        case ACPI_STATE_C3:
 257                /* Enable bus master reload */
 258                if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
 259                        acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
 260                break;
 261        }
 262
 263        pr->power.state = new;
 264
 265        return;
 266}
 267
 268static atomic_t c3_cpu_count;
 269
 270/* Common C-state entry for C2, C3, .. */
 271static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
 272{
 273        /* Don't trace irqs off for idle */
 274        stop_critical_timings();
 275        if (cstate->entry_method == ACPI_CSTATE_FFH) {
 276                /* Call into architectural FFH based C-state */
 277                acpi_processor_ffh_cstate_enter(cstate);
 278        } else {
 279                int unused;
 280                /* IO port based C-state */
 281                inb(cstate->address);
 282                /* Dummy wait op - must do something useless after P_LVL2 read
 283                   because chipsets cannot guarantee that STPCLK# signal
 284                   gets asserted in time to freeze execution properly. */
 285                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
 286        }
 287        start_critical_timings();
 288}
 289#endif /* !CONFIG_CPU_IDLE */
 290
 291#ifdef ARCH_APICTIMER_STOPS_ON_C3
 292
 293/*
 294 * Some BIOS implementations switch to C3 in the published C2 state.
 295 * This seems to be a common problem on AMD boxen, but other vendors
 296 * are affected too. We pick the most conservative approach: we assume
 297 * that the local APIC stops in both C2 and C3.
 298 */
 299static void acpi_timer_check_state(int state, struct acpi_processor *pr,
 300                                   struct acpi_processor_cx *cx)
 301{
 302        struct acpi_processor_power *pwr = &pr->power;
 303        u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
 304
 305        /*
 306         * Check, if one of the previous states already marked the lapic
 307         * unstable
 308         */
 309        if (pwr->timer_broadcast_on_state < state)
 310                return;
 311
 312        if (cx->type >= type)
 313                pr->power.timer_broadcast_on_state = state;
 314}
 315
 316static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
 317{
 318        unsigned long reason;
 319
 320        reason = pr->power.timer_broadcast_on_state < INT_MAX ?
 321                CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
 322
 323        clockevents_notify(reason, &pr->id);
 324}
 325
 326/* Power(C) State timer broadcast control */
 327static void acpi_state_timer_broadcast(struct acpi_processor *pr,
 328                                       struct acpi_processor_cx *cx,
 329                                       int broadcast)
 330{
 331        int state = cx - pr->power.states;
 332
 333        if (state >= pr->power.timer_broadcast_on_state) {
 334                unsigned long reason;
 335
 336                reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
 337                        CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
 338                clockevents_notify(reason, &pr->id);
 339        }
 340}
 341
 342#else
 343
 344static void acpi_timer_check_state(int state, struct acpi_processor *pr,
 345                                   struct acpi_processor_cx *cstate) { }
 346static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
 347static void acpi_state_timer_broadcast(struct acpi_processor *pr,
 348                                       struct acpi_processor_cx *cx,
 349                                       int broadcast)
 350{
 351}
 352
 353#endif
 354
 355/*
 356 * Suspend / resume control
 357 */
 358static int acpi_idle_suspend;
 359
 360int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 361{
 362        acpi_idle_suspend = 1;
 363        return 0;
 364}
 365
 366int acpi_processor_resume(struct acpi_device * device)
 367{
 368        acpi_idle_suspend = 0;
 369        return 0;
 370}
 371
 372#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
 373static int tsc_halts_in_c(int state)
 374{
 375        switch (boot_cpu_data.x86_vendor) {
 376        case X86_VENDOR_AMD:
 377                /*
 378                 * AMD Fam10h TSC will tick in all
 379                 * C/P/S0/S1 states when this bit is set.
 380                 */
 381                if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
 382                        return 0;
 383                /*FALL THROUGH*/
 384        case X86_VENDOR_INTEL:
 385                /* Several cases known where TSC halts in C2 too */
 386        default:
 387                return state > ACPI_STATE_C1;
 388        }
 389}
 390#endif
 391
 392#ifndef CONFIG_CPU_IDLE
 393static void acpi_processor_idle(void)
 394{
 395        struct acpi_processor *pr = NULL;
 396        struct acpi_processor_cx *cx = NULL;
 397        struct acpi_processor_cx *next_state = NULL;
 398        int sleep_ticks = 0;
 399        u32 t1, t2 = 0;
 400
 401        /*
 402         * Interrupts must be disabled during bus mastering calculations and
 403         * for C2/C3 transitions.
 404         */
 405        local_irq_disable();
 406
 407        pr = __get_cpu_var(processors);
 408        if (!pr) {
 409                local_irq_enable();
 410                return;
 411        }
 412
 413        /*
 414         * Check whether we truly need to go idle, or should
 415         * reschedule:
 416         */
 417        if (unlikely(need_resched())) {
 418                local_irq_enable();
 419                return;
 420        }
 421
 422        cx = pr->power.state;
 423        if (!cx || acpi_idle_suspend) {
 424                if (pm_idle_save) {
 425                        pm_idle_save(); /* enables IRQs */
 426                } else {
 427                        acpi_safe_halt();
 428                        local_irq_enable();
 429                }
 430
 431                return;
 432        }
 433
 434        /*
 435         * Check BM Activity
 436         * -----------------
 437         * Check for bus mastering activity (if required), record, and check
 438         * for demotion.
 439         */
 440        if (pr->flags.bm_check) {
 441                u32 bm_status = 0;
 442                unsigned long diff = jiffies - pr->power.bm_check_timestamp;
 443
 444                if (diff > 31)
 445                        diff = 31;
 446
 447                pr->power.bm_activity <<= diff;
 448
 449                acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
 450                if (bm_status) {
 451                        pr->power.bm_activity |= 0x1;
 452                        acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
 453                }
 454                /*
 455                 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
 456                 * the true state of bus mastering activity; forcing us to
 457                 * manually check the BMIDEA bit of each IDE channel.
 458                 */
 459                else if (errata.piix4.bmisx) {
 460                        if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
 461                            || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
 462                                pr->power.bm_activity |= 0x1;
 463                }
 464
 465                pr->power.bm_check_timestamp = jiffies;
 466
 467                /*
 468                 * If bus mastering is or was active this jiffy, demote
 469                 * to avoid a faulty transition.  Note that the processor
 470                 * won't enter a low-power state during this call (to this
 471                 * function) but should upon the next.
 472                 *
 473                 * TBD: A better policy might be to fallback to the demotion
 474                 *      state (use it for this quantum only) istead of
 475                 *      demoting -- and rely on duration as our sole demotion
 476                 *      qualification.  This may, however, introduce DMA
 477                 *      issues (e.g. floppy DMA transfer overrun/underrun).
 478                 */
 479                if ((pr->power.bm_activity & 0x1) &&
 480                    cx->demotion.threshold.bm) {
 481                        local_irq_enable();
 482                        next_state = cx->demotion.state;
 483                        goto end;
 484                }
 485        }
 486
 487#ifdef CONFIG_HOTPLUG_CPU
 488        /*
 489         * Check for P_LVL2_UP flag before entering C2 and above on
 490         * an SMP system. We do it here instead of doing it at _CST/P_LVL
 491         * detection phase, to work cleanly with logical CPU hotplug.
 492         */
 493        if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
 494            !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 495                cx = &pr->power.states[ACPI_STATE_C1];
 496#endif
 497
 498        /*
 499         * Sleep:
 500         * ------
 501         * Invoke the current Cx state to put the processor to sleep.
 502         */
 503        if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
 504                current_thread_info()->status &= ~TS_POLLING;
 505                /*
 506                 * TS_POLLING-cleared state must be visible before we
 507                 * test NEED_RESCHED:
 508                 */
 509                smp_mb();
 510                if (need_resched()) {
 511                        current_thread_info()->status |= TS_POLLING;
 512                        local_irq_enable();
 513                        return;
 514                }
 515        }
 516
 517        switch (cx->type) {
 518
 519        case ACPI_STATE_C1:
 520                /*
 521                 * Invoke C1.
 522                 * Use the appropriate idle routine, the one that would
 523                 * be used without acpi C-states.
 524                 */
 525                if (pm_idle_save) {
 526                        pm_idle_save(); /* enables IRQs */
 527                } else {
 528                        acpi_safe_halt();
 529                        local_irq_enable();
 530                }
 531
 532                /*
 533                 * TBD: Can't get time duration while in C1, as resumes
 534                 *      go to an ISR rather than here.  Need to instrument
 535                 *      base interrupt handler.
 536                 *
 537                 * Note: the TSC better not stop in C1, sched_clock() will
 538                 *       skew otherwise.
 539                 */
 540                sleep_ticks = 0xFFFFFFFF;
 541
 542                break;
 543
 544        case ACPI_STATE_C2:
 545                /* Get start time (ticks) */
 546                t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
 547                /* Tell the scheduler that we are going deep-idle: */
 548                sched_clock_idle_sleep_event();
 549                /* Invoke C2 */
 550                acpi_state_timer_broadcast(pr, cx, 1);
 551                acpi_cstate_enter(cx);
 552                /* Get end time (ticks) */
 553                t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
 554
 555#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
 556                /* TSC halts in C2, so notify users */
 557                if (tsc_halts_in_c(ACPI_STATE_C2))
 558                        mark_tsc_unstable("possible TSC halt in C2");
 559#endif
 560                /* Compute time (ticks) that we were actually asleep */
 561                sleep_ticks = ticks_elapsed(t1, t2);
 562
 563                /* Tell the scheduler how much we idled: */
 564                sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
 565
 566                /* Re-enable interrupts */
 567                local_irq_enable();
 568                /* Do not account our idle-switching overhead: */
 569                sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
 570
 571                current_thread_info()->status |= TS_POLLING;
 572                acpi_state_timer_broadcast(pr, cx, 0);
 573                break;
 574
 575        case ACPI_STATE_C3:
 576                acpi_unlazy_tlb(smp_processor_id());
 577                /*
 578                 * Must be done before busmaster disable as we might
 579                 * need to access HPET !
 580                 */
 581                acpi_state_timer_broadcast(pr, cx, 1);
 582                /*
 583                 * disable bus master
 584                 * bm_check implies we need ARB_DIS
 585                 * !bm_check implies we need cache flush
 586                 * bm_control implies whether we can do ARB_DIS
 587                 *
 588                 * That leaves a case where bm_check is set and bm_control is
 589                 * not set. In that case we cannot do much, we enter C3
 590                 * without doing anything.
 591                 */
 592                if (pr->flags.bm_check && pr->flags.bm_control) {
 593                        if (atomic_inc_return(&c3_cpu_count) ==
 594                            num_online_cpus()) {
 595                                /*
 596                                 * All CPUs are trying to go to C3
 597                                 * Disable bus master arbitration
 598                                 */
 599                                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
 600                        }
 601                } else if (!pr->flags.bm_check) {
 602                        /* SMP with no shared cache... Invalidate cache  */
 603                        ACPI_FLUSH_CPU_CACHE();
 604                }
 605
 606                /* Get start time (ticks) */
 607                t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
 608                /* Invoke C3 */
 609                /* Tell the scheduler that we are going deep-idle: */
 610                sched_clock_idle_sleep_event();
 611                acpi_cstate_enter(cx);
 612                /* Get end time (ticks) */
 613                t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
 614                if (pr->flags.bm_check && pr->flags.bm_control) {
 615                        /* Enable bus master arbitration */
 616                        atomic_dec(&c3_cpu_count);
 617                        acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
 618                }
 619
 620#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
 621                /* TSC halts in C3, so notify users */
 622                if (tsc_halts_in_c(ACPI_STATE_C3))
 623                        mark_tsc_unstable("TSC halts in C3");
 624#endif
 625                /* Compute time (ticks) that we were actually asleep */
 626                sleep_ticks = ticks_elapsed(t1, t2);
 627                /* Tell the scheduler how much we idled: */
 628                sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
 629
 630                /* Re-enable interrupts */
 631                local_irq_enable();
 632                /* Do not account our idle-switching overhead: */
 633                sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
 634
 635                current_thread_info()->status |= TS_POLLING;
 636                acpi_state_timer_broadcast(pr, cx, 0);
 637                break;
 638
 639        default:
 640                local_irq_enable();
 641                return;
 642        }
 643        cx->usage++;
 644        if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
 645                cx->time += sleep_ticks;
 646
 647        next_state = pr->power.state;
 648
 649#ifdef CONFIG_HOTPLUG_CPU
 650        /* Don't do promotion/demotion */
 651        if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
 652            !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
 653                next_state = cx;
 654                goto end;
 655        }
 656#endif
 657
 658        /*
 659         * Promotion?
 660         * ----------
 661         * Track the number of longs (time asleep is greater than threshold)
 662         * and promote when the count threshold is reached.  Note that bus
 663         * mastering activity may prevent promotions.
 664         * Do not promote above max_cstate.
 665         */
 666        if (cx->promotion.state &&
 667            ((cx->promotion.state - pr->power.states) <= max_cstate)) {
 668                if (sleep_ticks > cx->promotion.threshold.ticks &&
 669                  cx->promotion.state->latency <=
 670                                pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
 671                        cx->promotion.count++;
 672                        cx->demotion.count = 0;
 673                        if (cx->promotion.count >=
 674                            cx->promotion.threshold.count) {
 675                                if (pr->flags.bm_check) {
 676                                        if (!
 677                                            (pr->power.bm_activity & cx->
 678                                             promotion.threshold.bm)) {
 679                                                next_state =
 680                                                    cx->promotion.state;
 681                                                goto end;
 682                                        }
 683                                } else {
 684                                        next_state = cx->promotion.state;
 685                                        goto end;
 686                                }
 687                        }
 688                }
 689        }
 690
 691        /*
 692         * Demotion?
 693         * ---------
 694         * Track the number of shorts (time asleep is less than time threshold)
 695         * and demote when the usage threshold is reached.
 696         */
 697        if (cx->demotion.state) {
 698                if (sleep_ticks < cx->demotion.threshold.ticks) {
 699                        cx->demotion.count++;
 700                        cx->promotion.count = 0;
 701                        if (cx->demotion.count >= cx->demotion.threshold.count) {
 702                                next_state = cx->demotion.state;
 703                                goto end;
 704                        }
 705                }
 706        }
 707
 708      end:
 709        /*
 710         * Demote if current state exceeds max_cstate
 711         * or if the latency of the current state is unacceptable
 712         */
 713        if ((pr->power.state - pr->power.states) > max_cstate ||
 714                pr->power.state->latency >
 715                                pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
 716                if (cx->demotion.state)
 717                        next_state = cx->demotion.state;
 718        }
 719
 720        /*
 721         * New Cx State?
 722         * -------------
 723         * If we're going to start using a new Cx state we must clean up
 724         * from the previous and prepare to use the new.
 725         */
 726        if (next_state != pr->power.state)
 727                acpi_processor_power_activate(pr, next_state);
 728}
 729
 730static int acpi_processor_set_power_policy(struct acpi_processor *pr)
 731{
 732        unsigned int i;
 733        unsigned int state_is_set = 0;
 734        struct acpi_processor_cx *lower = NULL;
 735        struct acpi_processor_cx *higher = NULL;
 736        struct acpi_processor_cx *cx;
 737
 738
 739        if (!pr)
 740                return -EINVAL;
 741
 742        /*
 743         * This function sets the default Cx state policy (OS idle handler).
 744         * Our scheme is to promote quickly to C2 but more conservatively
 745         * to C3.  We're favoring C2  for its characteristics of low latency
 746         * (quick response), good power savings, and ability to allow bus
 747         * mastering activity.  Note that the Cx state policy is completely
 748         * customizable and can be altered dynamically.
 749         */
 750
 751        /* startup state */
 752        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
 753                cx = &pr->power.states[i];
 754                if (!cx->valid)
 755                        continue;
 756
 757                if (!state_is_set)
 758                        pr->power.state = cx;
 759                state_is_set++;
 760                break;
 761        }
 762
 763        if (!state_is_set)
 764                return -ENODEV;
 765
 766        /* demotion */
 767        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
 768                cx = &pr->power.states[i];
 769                if (!cx->valid)
 770                        continue;
 771
 772                if (lower) {
 773                        cx->demotion.state = lower;
 774                        cx->demotion.threshold.ticks = cx->latency_ticks;
 775                        cx->demotion.threshold.count = 1;
 776                        if (cx->type == ACPI_STATE_C3)
 777                                cx->demotion.threshold.bm = bm_history;
 778                }
 779
 780                lower = cx;
 781        }
 782
 783        /* promotion */
 784        for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
 785                cx = &pr->power.states[i];
 786                if (!cx->valid)
 787                        continue;
 788
 789                if (higher) {
 790                        cx->promotion.state = higher;
 791                        cx->promotion.threshold.ticks = cx->latency_ticks;
 792                        if (cx->type >= ACPI_STATE_C2)
 793                                cx->promotion.threshold.count = 4;
 794                        else
 795                                cx->promotion.threshold.count = 10;
 796                        if (higher->type == ACPI_STATE_C3)
 797                                cx->promotion.threshold.bm = bm_history;
 798                }
 799
 800                higher = cx;
 801        }
 802
 803        return 0;
 804}
 805#endif /* !CONFIG_CPU_IDLE */
 806
 807static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 808{
 809
 810        if (!pr)
 811                return -EINVAL;
 812
 813        if (!pr->pblk)
 814                return -ENODEV;
 815
 816        /* if info is obtained from pblk/fadt, type equals state */
 817        pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
 818        pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
 819
 820#ifndef CONFIG_HOTPLUG_CPU
 821        /*
 822         * Check for P_LVL2_UP flag before entering C2 and above on
 823         * an SMP system.
 824         */
 825        if ((num_online_cpus() > 1) &&
 826            !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 827                return -ENODEV;
 828#endif
 829
 830        /* determine C2 and C3 address from pblk */
 831        pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
 832        pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
 833
 834        /* determine latencies from FADT */
 835        pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
 836        pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
 837
 838        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 839                          "lvl2[0x%08x] lvl3[0x%08x]\n",
 840                          pr->power.states[ACPI_STATE_C2].address,
 841                          pr->power.states[ACPI_STATE_C3].address));
 842
 843        return 0;
 844}
 845
 846static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
 847{
 848        if (!pr->power.states[ACPI_STATE_C1].valid) {
 849                /* set the first C-State to C1 */
 850                /* all processors need to support C1 */
 851                pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
 852                pr->power.states[ACPI_STATE_C1].valid = 1;
 853                pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
 854        }
 855        /* the C0 state only exists as a filler in our array */
 856        pr->power.states[ACPI_STATE_C0].valid = 1;
 857        return 0;
 858}
 859
 860static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 861{
 862        acpi_status status = 0;
 863        acpi_integer count;
 864        int current_count;
 865        int i;
 866        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 867        union acpi_object *cst;
 868
 869
 870        if (nocst)
 871                return -ENODEV;
 872
 873        current_count = 0;
 874
 875        status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
 876        if (ACPI_FAILURE(status)) {
 877                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
 878                return -ENODEV;
 879        }
 880
 881        cst = buffer.pointer;
 882
 883        /* There must be at least 2 elements */
 884        if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
 885                printk(KERN_ERR PREFIX "not enough elements in _CST\n");
 886                status = -EFAULT;
 887                goto end;
 888        }
 889
 890        count = cst->package.elements[0].integer.value;
 891
 892        /* Validate number of power states. */
 893        if (count < 1 || count != cst->package.count - 1) {
 894                printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
 895                status = -EFAULT;
 896                goto end;
 897        }
 898
 899        /* Tell driver that at least _CST is supported. */
 900        pr->flags.has_cst = 1;
 901
 902        for (i = 1; i <= count; i++) {
 903                union acpi_object *element;
 904                union acpi_object *obj;
 905                struct acpi_power_register *reg;
 906                struct acpi_processor_cx cx;
 907
 908                memset(&cx, 0, sizeof(cx));
 909
 910                element = &(cst->package.elements[i]);
 911                if (element->type != ACPI_TYPE_PACKAGE)
 912                        continue;
 913
 914                if (element->package.count != 4)
 915                        continue;
 916
 917                obj = &(element->package.elements[0]);
 918
 919                if (obj->type != ACPI_TYPE_BUFFER)
 920                        continue;
 921
 922                reg = (struct acpi_power_register *)obj->buffer.pointer;
 923
 924                if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
 925                    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
 926                        continue;
 927
 928                /* There should be an easy way to extract an integer... */
 929                obj = &(element->package.elements[1]);
 930                if (obj->type != ACPI_TYPE_INTEGER)
 931                        continue;
 932
 933                cx.type = obj->integer.value;
 934                /*
 935                 * Some buggy BIOSes won't list C1 in _CST -
 936                 * Let acpi_processor_get_power_info_default() handle them later
 937                 */
 938                if (i == 1 && cx.type != ACPI_STATE_C1)
 939                        current_count++;
 940
 941                cx.address = reg->address;
 942                cx.index = current_count + 1;
 943
 944                cx.entry_method = ACPI_CSTATE_SYSTEMIO;
 945                if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
 946                        if (acpi_processor_ffh_cstate_probe
 947                                        (pr->id, &cx, reg) == 0) {
 948                                cx.entry_method = ACPI_CSTATE_FFH;
 949                        } else if (cx.type == ACPI_STATE_C1) {
 950                                /*
 951                                 * C1 is a special case where FIXED_HARDWARE
 952                                 * can be handled in non-MWAIT way as well.
 953                                 * In that case, save this _CST entry info.
 954                                 * Otherwise, ignore this info and continue.
 955                                 */
 956                                cx.entry_method = ACPI_CSTATE_HALT;
 957                                snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 958                        } else {
 959                                continue;
 960                        }
 961                        if (cx.type == ACPI_STATE_C1 &&
 962                                        (idle_halt || idle_nomwait)) {
 963                                /*
 964                                 * In most cases the C1 space_id obtained from
 965                                 * _CST object is FIXED_HARDWARE access mode.
 966                                 * But when the option of idle=halt is added,
 967                                 * the entry_method type should be changed from
 968                                 * CSTATE_FFH to CSTATE_HALT.
 969                                 * When the option of idle=nomwait is added,
 970                                 * the C1 entry_method type should be
 971                                 * CSTATE_HALT.
 972                                 */
 973                                cx.entry_method = ACPI_CSTATE_HALT;
 974                                snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 975                        }
 976                } else {
 977                        snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
 978                                 cx.address);
 979                }
 980
 981                if (cx.type == ACPI_STATE_C1) {
 982                        cx.valid = 1;
 983                }
 984
 985                obj = &(element->package.elements[2]);
 986                if (obj->type != ACPI_TYPE_INTEGER)
 987                        continue;
 988
 989                cx.latency = obj->integer.value;
 990
 991                obj = &(element->package.elements[3]);
 992                if (obj->type != ACPI_TYPE_INTEGER)
 993                        continue;
 994
 995                cx.power = obj->integer.value;
 996
 997                current_count++;
 998                memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
 999
1000                /*
1001                 * We support total ACPI_PROCESSOR_MAX_POWER - 1
1002                 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
1003                 */
1004                if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
1005                        printk(KERN_WARNING
1006                               "Limiting number of power states to max (%d)\n",
1007                               ACPI_PROCESSOR_MAX_POWER);
1008                        printk(KERN_WARNING
1009                               "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1010                        break;
1011                }
1012        }
1013
1014        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
1015                          current_count));
1016
1017        /* Validate number of power states discovered */
1018        if (current_count < 2)
1019                status = -EFAULT;
1020
1021      end:
1022        kfree(buffer.pointer);
1023
1024        return status;
1025}
1026
1027static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1028{
1029
1030        if (!cx->address)
1031                return;
1032
1033        /*
1034         * C2 latency must be less than or equal to 100
1035         * microseconds.
1036         */
1037        else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1038                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1039                                  "latency too large [%d]\n", cx->latency));
1040                return;
1041        }
1042
1043        /*
1044         * Otherwise we've met all of our C2 requirements.
1045         * Normalize the C2 latency to expidite policy
1046         */
1047        cx->valid = 1;
1048
1049#ifndef CONFIG_CPU_IDLE
1050        cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1051#else
1052        cx->latency_ticks = cx->latency;
1053#endif
1054
1055        return;
1056}
1057
1058static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1059                                           struct acpi_processor_cx *cx)
1060{
1061        static int bm_check_flag;
1062
1063
1064        if (!cx->address)
1065                return;
1066
1067        /*
1068         * C3 latency must be less than or equal to 1000
1069         * microseconds.
1070         */
1071        else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1072                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1073                                  "latency too large [%d]\n", cx->latency));
1074                return;
1075        }
1076
1077        /*
1078         * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1079         * DMA transfers are used by any ISA device to avoid livelock.
1080         * Note that we could disable Type-F DMA (as recommended by
1081         * the erratum), but this is known to disrupt certain ISA
1082         * devices thus we take the conservative approach.
1083         */
1084        else if (errata.piix4.fdma) {
1085                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1086                                  "C3 not supported on PIIX4 with Type-F DMA\n"));
1087                return;
1088        }
1089
1090        /* All the logic here assumes flags.bm_check is same across all CPUs */
1091        if (!bm_check_flag) {
1092                /* Determine whether bm_check is needed based on CPU  */
1093                acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1094                bm_check_flag = pr->flags.bm_check;
1095        } else {
1096                pr->flags.bm_check = bm_check_flag;
1097        }
1098
1099        if (pr->flags.bm_check) {
1100                if (!pr->flags.bm_control) {
1101                        if (pr->flags.has_cst != 1) {
1102                                /* bus mastering control is necessary */
1103                                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1104                                        "C3 support requires BM control\n"));
1105                                return;
1106                        } else {
1107                                /* Here we enter C3 without bus mastering */
1108                                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1109                                        "C3 support without BM control\n"));
1110                        }
1111                }
1112        } else {
1113                /*
1114                 * WBINVD should be set in fadt, for C3 state to be
1115                 * supported on when bm_check is not required.
1116                 */
1117                if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1118                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1119                                          "Cache invalidation should work properly"
1120                                          " for C3 to be enabled on SMP systems\n"));
1121                        return;
1122                }
1123                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1124        }
1125
1126        /*
1127         * Otherwise we've met all of our C3 requirements.
1128         * Normalize the C3 latency to expidite policy.  Enable
1129         * checking of bus mastering status (bm_check) so we can
1130         * use this in our C3 policy
1131         */
1132        cx->valid = 1;
1133
1134#ifndef CONFIG_CPU_IDLE
1135        cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1136#else
1137        cx->latency_ticks = cx->latency;
1138#endif
1139
1140        return;
1141}
1142
1143static int acpi_processor_power_verify(struct acpi_processor *pr)
1144{
1145        unsigned int i;
1146        unsigned int working = 0;
1147
1148        pr->power.timer_broadcast_on_state = INT_MAX;
1149
1150        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1151                struct acpi_processor_cx *cx = &pr->power.states[i];
1152
1153                switch (cx->type) {
1154                case ACPI_STATE_C1:
1155                        cx->valid = 1;
1156                        break;
1157
1158                case ACPI_STATE_C2:
1159                        acpi_processor_power_verify_c2(cx);
1160                        if (cx->valid)
1161                                acpi_timer_check_state(i, pr, cx);
1162                        break;
1163
1164                case ACPI_STATE_C3:
1165                        acpi_processor_power_verify_c3(pr, cx);
1166                        if (cx->valid)
1167                                acpi_timer_check_state(i, pr, cx);
1168                        break;
1169                }
1170
1171                if (cx->valid)
1172                        working++;
1173        }
1174
1175        acpi_propagate_timer_broadcast(pr);
1176
1177        return (working);
1178}
1179
1180static int acpi_processor_get_power_info(struct acpi_processor *pr)
1181{
1182        unsigned int i;
1183        int result;
1184
1185
1186        /* NOTE: the idle thread may not be running while calling
1187         * this function */
1188
1189        /* Zero initialize all the C-states info. */
1190        memset(pr->power.states, 0, sizeof(pr->power.states));
1191
1192        result = acpi_processor_get_power_info_cst(pr);
1193        if (result == -ENODEV)
1194                result = acpi_processor_get_power_info_fadt(pr);
1195
1196        if (result)
1197                return result;
1198
1199        acpi_processor_get_power_info_default(pr);
1200
1201        pr->power.count = acpi_processor_power_verify(pr);
1202
1203#ifndef CONFIG_CPU_IDLE
1204        /*
1205         * Set Default Policy
1206         * ------------------
1207         * Now that we know which states are supported, set the default
1208         * policy.  Note that this policy can be changed dynamically
1209         * (e.g. encourage deeper sleeps to conserve battery life when
1210         * not on AC).
1211         */
1212        result = acpi_processor_set_power_policy(pr);
1213        if (result)
1214                return result;
1215#endif
1216
1217        /*
1218         * if one state of type C2 or C3 is available, mark this
1219         * CPU as being "idle manageable"
1220         */
1221        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1222                if (pr->power.states[i].valid) {
1223                        pr->power.count = i;
1224                        if (pr->power.states[i].type >= ACPI_STATE_C2)
1225                                pr->flags.power = 1;
1226                }
1227        }
1228
1229        return 0;
1230}
1231
1232static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1233{
1234        struct acpi_processor *pr = seq->private;
1235        unsigned int i;
1236
1237
1238        if (!pr)
1239                goto end;
1240
1241        seq_printf(seq, "active state:            C%zd\n"
1242                   "max_cstate:              C%d\n"
1243                   "bus master activity:     %08x\n"
1244                   "maximum allowed latency: %d usec\n",
1245                   pr->power.state ? pr->power.state - pr->power.states : 0,
1246                   max_cstate, (unsigned)pr->power.bm_activity,
1247                   pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1248
1249        seq_puts(seq, "states:\n");
1250
1251        for (i = 1; i <= pr->power.count; i++) {
1252                seq_printf(seq, "   %cC%d:                  ",
1253                           (&pr->power.states[i] ==
1254                            pr->power.state ? '*' : ' '), i);
1255
1256                if (!pr->power.states[i].valid) {
1257                        seq_puts(seq, "<not supported>\n");
1258                        continue;
1259                }
1260
1261                switch (pr->power.states[i].type) {
1262                case ACPI_STATE_C1:
1263                        seq_printf(seq, "type[C1] ");
1264                        break;
1265                case ACPI_STATE_C2:
1266                        seq_printf(seq, "type[C2] ");
1267                        break;
1268                case ACPI_STATE_C3:
1269                        seq_printf(seq, "type[C3] ");
1270                        break;
1271                default:
1272                        seq_printf(seq, "type[--] ");
1273                        break;
1274                }
1275
1276                if (pr->power.states[i].promotion.state)
1277                        seq_printf(seq, "promotion[C%zd] ",
1278                                   (pr->power.states[i].promotion.state -
1279                                    pr->power.states));
1280                else
1281                        seq_puts(seq, "promotion[--] ");
1282
1283                if (pr->power.states[i].demotion.state)
1284                        seq_printf(seq, "demotion[C%zd] ",
1285                                   (pr->power.states[i].demotion.state -
1286                                    pr->power.states));
1287                else
1288                        seq_puts(seq, "demotion[--] ");
1289
1290                seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1291                           pr->power.states[i].latency,
1292                           pr->power.states[i].usage,
1293                           (unsigned long long)pr->power.states[i].time);
1294        }
1295
1296      end:
1297        return 0;
1298}
1299
1300static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1301{
1302        return single_open(file, acpi_processor_power_seq_show,
1303                           PDE(inode)->data);
1304}
1305
1306static const struct file_operations acpi_processor_power_fops = {
1307        .owner = THIS_MODULE,
1308        .open = acpi_processor_power_open_fs,
1309        .read = seq_read,
1310        .llseek = seq_lseek,
1311        .release = single_release,
1312};
1313
1314#ifndef CONFIG_CPU_IDLE
1315
1316int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1317{
1318        int result = 0;
1319
1320        if (boot_option_idle_override)
1321                return 0;
1322
1323        if (!pr)
1324                return -EINVAL;
1325
1326        if (nocst) {
1327                return -ENODEV;
1328        }
1329
1330        if (!pr->flags.power_setup_done)
1331                return -ENODEV;
1332
1333        /*
1334         * Fall back to the default idle loop, when pm_idle_save had
1335         * been initialized.
1336         */
1337        if (pm_idle_save) {
1338                pm_idle = pm_idle_save;
1339                /* Relies on interrupts forcing exit from idle. */
1340                synchronize_sched();
1341        }
1342
1343        pr->flags.power = 0;
1344        result = acpi_processor_get_power_info(pr);
1345        if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1346                pm_idle = acpi_processor_idle;
1347
1348        return result;
1349}
1350
1351#ifdef CONFIG_SMP
1352static void smp_callback(void *v)
1353{
1354        /* we already woke the CPU up, nothing more to do */
1355}
1356
1357/*
1358 * This function gets called when a part of the kernel has a new latency
1359 * requirement.  This means we need to get all processors out of their C-state,
1360 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1361 * wakes them all right up.
1362 */
1363static int acpi_processor_latency_notify(struct notifier_block *b,
1364                unsigned long l, void *v)
1365{
1366        smp_call_function(smp_callback, NULL, 1);
1367        return NOTIFY_OK;
1368}
1369
1370static struct notifier_block acpi_processor_latency_notifier = {
1371        .notifier_call = acpi_processor_latency_notify,
1372};
1373
1374#endif
1375
1376#else /* CONFIG_CPU_IDLE */
1377
1378/**
1379 * acpi_idle_bm_check - checks if bus master activity was detected
1380 */
1381static int acpi_idle_bm_check(void)
1382{
1383        u32 bm_status = 0;
1384
1385        acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1386        if (bm_status)
1387                acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1388        /*
1389         * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1390         * the true state of bus mastering activity; forcing us to
1391         * manually check the BMIDEA bit of each IDE channel.
1392         */
1393        else if (errata.piix4.bmisx) {
1394                if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1395                    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1396                        bm_status = 1;
1397        }
1398        return bm_status;
1399}
1400
1401/**
1402 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1403 * @pr: the processor
1404 * @target: the new target state
1405 */
1406static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1407                                           struct acpi_processor_cx *target)
1408{
1409        if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1410                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1411                pr->flags.bm_rld_set = 0;
1412        }
1413
1414        if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1415                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1416                pr->flags.bm_rld_set = 1;
1417        }
1418}
1419
1420/**
1421 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1422 * @cx: cstate data
1423 *
1424 * Caller disables interrupt before call and enables interrupt after return.
1425 */
1426static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1427{
1428        /* Don't trace irqs off for idle */
1429        stop_critical_timings();
1430        if (cx->entry_method == ACPI_CSTATE_FFH) {
1431                /* Call into architectural FFH based C-state */
1432                acpi_processor_ffh_cstate_enter(cx);
1433        } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1434                acpi_safe_halt();
1435        } else {
1436                int unused;
1437                /* IO port based C-state */
1438                inb(cx->address);
1439                /* Dummy wait op - must do something useless after P_LVL2 read
1440                   because chipsets cannot guarantee that STPCLK# signal
1441                   gets asserted in time to freeze execution properly. */
1442                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1443        }
1444        start_critical_timings();
1445}
1446
1447/**
1448 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1449 * @dev: the target CPU
1450 * @state: the state data
1451 *
1452 * This is equivalent to the HALT instruction.
1453 */
1454static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1455                              struct cpuidle_state *state)
1456{
1457        u32 t1, t2;
1458        struct acpi_processor *pr;
1459        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1460
1461        pr = __get_cpu_var(processors);
1462
1463        if (unlikely(!pr))
1464                return 0;
1465
1466        local_irq_disable();
1467
1468        /* Do not access any ACPI IO ports in suspend path */
1469        if (acpi_idle_suspend) {
1470                acpi_safe_halt();
1471                local_irq_enable();
1472                return 0;
1473        }
1474
1475        if (pr->flags.bm_check)
1476                acpi_idle_update_bm_rld(pr, cx);
1477
1478        t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1479        acpi_idle_do_entry(cx);
1480        t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1481
1482        local_irq_enable();
1483        cx->usage++;
1484
1485        return ticks_elapsed_in_us(t1, t2);
1486}
1487
1488/**
1489 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1490 * @dev: the target CPU
1491 * @state: the state data
1492 */
1493static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1494                                  struct cpuidle_state *state)
1495{
1496        struct acpi_processor *pr;
1497        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1498        u32 t1, t2;
1499        int sleep_ticks = 0;
1500
1501        pr = __get_cpu_var(processors);
1502
1503        if (unlikely(!pr))
1504                return 0;
1505
1506        if (acpi_idle_suspend)
1507                return(acpi_idle_enter_c1(dev, state));
1508
1509        local_irq_disable();
1510        current_thread_info()->status &= ~TS_POLLING;
1511        /*
1512         * TS_POLLING-cleared state must be visible before we test
1513         * NEED_RESCHED:
1514         */
1515        smp_mb();
1516
1517        if (unlikely(need_resched())) {
1518                current_thread_info()->status |= TS_POLLING;
1519                local_irq_enable();
1520                return 0;
1521        }
1522
1523        /*
1524         * Must be done before busmaster disable as we might need to
1525         * access HPET !
1526         */
1527        acpi_state_timer_broadcast(pr, cx, 1);
1528
1529        if (pr->flags.bm_check)
1530                acpi_idle_update_bm_rld(pr, cx);
1531
1532        if (cx->type == ACPI_STATE_C3)
1533                ACPI_FLUSH_CPU_CACHE();
1534
1535        t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1536        /* Tell the scheduler that we are going deep-idle: */
1537        sched_clock_idle_sleep_event();
1538        acpi_idle_do_entry(cx);
1539        t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1540
1541#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1542        /* TSC could halt in idle, so notify users */
1543        if (tsc_halts_in_c(cx->type))
1544                mark_tsc_unstable("TSC halts in idle");;
1545#endif
1546        sleep_ticks = ticks_elapsed(t1, t2);
1547
1548        /* Tell the scheduler how much we idled: */
1549        sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1550
1551        local_irq_enable();
1552        current_thread_info()->status |= TS_POLLING;
1553
1554        cx->usage++;
1555
1556        acpi_state_timer_broadcast(pr, cx, 0);
1557        cx->time += sleep_ticks;
1558        return ticks_elapsed_in_us(t1, t2);
1559}
1560
1561static int c3_cpu_count;
1562static DEFINE_SPINLOCK(c3_lock);
1563
1564/**
1565 * acpi_idle_enter_bm - enters C3 with proper BM handling
1566 * @dev: the target CPU
1567 * @state: the state data
1568 *
1569 * If BM is detected, the deepest non-C3 idle state is entered instead.
1570 */
1571static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1572                              struct cpuidle_state *state)
1573{
1574        struct acpi_processor *pr;
1575        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1576        u32 t1, t2;
1577        int sleep_ticks = 0;
1578
1579        pr = __get_cpu_var(processors);
1580
1581        if (unlikely(!pr))
1582                return 0;
1583
1584        if (acpi_idle_suspend)
1585                return(acpi_idle_enter_c1(dev, state));
1586
1587        if (acpi_idle_bm_check()) {
1588                if (dev->safe_state) {
1589                        dev->last_state = dev->safe_state;
1590                        return dev->safe_state->enter(dev, dev->safe_state);
1591                } else {
1592                        local_irq_disable();
1593                        acpi_safe_halt();
1594                        local_irq_enable();
1595                        return 0;
1596                }
1597        }
1598
1599        local_irq_disable();
1600        current_thread_info()->status &= ~TS_POLLING;
1601        /*
1602         * TS_POLLING-cleared state must be visible before we test
1603         * NEED_RESCHED:
1604         */
1605        smp_mb();
1606
1607        if (unlikely(need_resched())) {
1608                current_thread_info()->status |= TS_POLLING;
1609                local_irq_enable();
1610                return 0;
1611        }
1612
1613        acpi_unlazy_tlb(smp_processor_id());
1614
1615        /* Tell the scheduler that we are going deep-idle: */
1616        sched_clock_idle_sleep_event();
1617        /*
1618         * Must be done before busmaster disable as we might need to
1619         * access HPET !
1620         */
1621        acpi_state_timer_broadcast(pr, cx, 1);
1622
1623        acpi_idle_update_bm_rld(pr, cx);
1624
1625        /*
1626         * disable bus master
1627         * bm_check implies we need ARB_DIS
1628         * !bm_check implies we need cache flush
1629         * bm_control implies whether we can do ARB_DIS
1630         *
1631         * That leaves a case where bm_check is set and bm_control is
1632         * not set. In that case we cannot do much, we enter C3
1633         * without doing anything.
1634         */
1635        if (pr->flags.bm_check && pr->flags.bm_control) {
1636                spin_lock(&c3_lock);
1637                c3_cpu_count++;
1638                /* Disable bus master arbitration when all CPUs are in C3 */
1639                if (c3_cpu_count == num_online_cpus())
1640                        acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1641                spin_unlock(&c3_lock);
1642        } else if (!pr->flags.bm_check) {
1643                ACPI_FLUSH_CPU_CACHE();
1644        }
1645
1646        t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1647        acpi_idle_do_entry(cx);
1648        t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1649
1650        /* Re-enable bus master arbitration */
1651        if (pr->flags.bm_check && pr->flags.bm_control) {
1652                spin_lock(&c3_lock);
1653                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1654                c3_cpu_count--;
1655                spin_unlock(&c3_lock);
1656        }
1657
1658#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1659        /* TSC could halt in idle, so notify users */
1660        if (tsc_halts_in_c(ACPI_STATE_C3))
1661                mark_tsc_unstable("TSC halts in idle");
1662#endif
1663        sleep_ticks = ticks_elapsed(t1, t2);
1664        /* Tell the scheduler how much we idled: */
1665        sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1666
1667        local_irq_enable();
1668        current_thread_info()->status |= TS_POLLING;
1669
1670        cx->usage++;
1671
1672        acpi_state_timer_broadcast(pr, cx, 0);
1673        cx->time += sleep_ticks;
1674        return ticks_elapsed_in_us(t1, t2);
1675}
1676
1677struct cpuidle_driver acpi_idle_driver = {
1678        .name =                "acpi_idle",
1679        .owner =        THIS_MODULE,
1680};
1681
1682/**
1683 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1684 * @pr: the ACPI processor
1685 */
1686static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1687{
1688        int i, count = CPUIDLE_DRIVER_STATE_START;
1689        struct acpi_processor_cx *cx;
1690        struct cpuidle_state *state;
1691        struct cpuidle_device *dev = &pr->power.dev;
1692
1693        if (!pr->flags.power_setup_done)
1694                return -EINVAL;
1695
1696        if (pr->flags.power == 0) {
1697                return -EINVAL;
1698        }
1699
1700        dev->cpu = pr->id;
1701        for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1702                dev->states[i].name[0] = '\0';
1703                dev->states[i].desc[0] = '\0';
1704        }
1705
1706        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1707                cx = &pr->power.states[i];
1708                state = &dev->states[count];
1709
1710                if (!cx->valid)
1711                        continue;
1712
1713#ifdef CONFIG_HOTPLUG_CPU
1714                if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1715                    !pr->flags.has_cst &&
1716                    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1717                        continue;
1718#endif
1719                cpuidle_set_statedata(state, cx);
1720
1721                snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1722                strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1723                state->exit_latency = cx->latency;
1724                state->target_residency = cx->latency * latency_factor;
1725                state->power_usage = cx->power;
1726
1727                state->flags = 0;
1728                switch (cx->type) {
1729                        case ACPI_STATE_C1:
1730                        state->flags |= CPUIDLE_FLAG_SHALLOW;
1731                        if (cx->entry_method == ACPI_CSTATE_FFH)
1732                                state->flags |= CPUIDLE_FLAG_TIME_VALID;
1733
1734                        state->enter = acpi_idle_enter_c1;
1735                        dev->safe_state = state;
1736                        break;
1737
1738                        case ACPI_STATE_C2:
1739                        state->flags |= CPUIDLE_FLAG_BALANCED;
1740                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
1741                        state->enter = acpi_idle_enter_simple;
1742                        dev->safe_state = state;
1743                        break;
1744
1745                        case ACPI_STATE_C3:
1746                        state->flags |= CPUIDLE_FLAG_DEEP;
1747                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
1748                        state->flags |= CPUIDLE_FLAG_CHECK_BM;
1749                        state->enter = pr->flags.bm_check ?
1750                                        acpi_idle_enter_bm :
1751                                        acpi_idle_enter_simple;
1752                        break;
1753                }
1754
1755                count++;
1756                if (count == CPUIDLE_STATE_MAX)
1757                        break;
1758        }
1759
1760        dev->state_count = count;
1761
1762        if (!count)
1763                return -EINVAL;
1764
1765        return 0;
1766}
1767
1768int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1769{
1770        int ret = 0;
1771
1772        if (boot_option_idle_override)
1773                return 0;
1774
1775        if (!pr)
1776                return -EINVAL;
1777
1778        if (nocst) {
1779                return -ENODEV;
1780        }
1781
1782        if (!pr->flags.power_setup_done)
1783                return -ENODEV;
1784
1785        cpuidle_pause_and_lock();
1786        cpuidle_disable_device(&pr->power.dev);
1787        acpi_processor_get_power_info(pr);
1788        if (pr->flags.power) {
1789                acpi_processor_setup_cpuidle(pr);
1790                ret = cpuidle_enable_device(&pr->power.dev);
1791        }
1792        cpuidle_resume_and_unlock();
1793
1794        return ret;
1795}
1796
1797#endif /* CONFIG_CPU_IDLE */
1798
1799int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1800                              struct acpi_device *device)
1801{
1802        acpi_status status = 0;
1803        static int first_run;
1804        struct proc_dir_entry *entry = NULL;
1805        unsigned int i;
1806
1807        if (boot_option_idle_override)
1808                return 0;
1809
1810        if (!first_run) {
1811                if (idle_halt) {
1812                        /*
1813                         * When the boot option of "idle=halt" is added, halt
1814                         * is used for CPU IDLE.
1815                         * In such case C2/C3 is meaningless. So the max_cstate
1816                         * is set to one.
1817                         */
1818                        max_cstate = 1;
1819                }
1820                dmi_check_system(processor_power_dmi_table);
1821                max_cstate = acpi_processor_cstate_check(max_cstate);
1822                if (max_cstate < ACPI_C_STATES_MAX)
1823                        printk(KERN_NOTICE
1824                               "ACPI: processor limited to max C-state %d\n",
1825                               max_cstate);
1826                first_run++;
1827#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1828                pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1829                                &acpi_processor_latency_notifier);
1830#endif
1831        }
1832
1833        if (!pr)
1834                return -EINVAL;
1835
1836        if (acpi_gbl_FADT.cst_control && !nocst) {
1837                status =
1838                    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1839                if (ACPI_FAILURE(status)) {
1840                        ACPI_EXCEPTION((AE_INFO, status,
1841                                        "Notifying BIOS of _CST ability failed"));
1842                }
1843        }
1844
1845        acpi_processor_get_power_info(pr);
1846        pr->flags.power_setup_done = 1;
1847
1848        /*
1849         * Install the idle handler if processor power management is supported.
1850         * Note that we use previously set idle handler will be used on
1851         * platforms that only support C1.
1852         */
1853        if (pr->flags.power) {
1854#ifdef CONFIG_CPU_IDLE
1855                acpi_processor_setup_cpuidle(pr);
1856                if (cpuidle_register_device(&pr->power.dev))
1857                        return -EIO;
1858#endif
1859
1860                printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1861                for (i = 1; i <= pr->power.count; i++)
1862                        if (pr->power.states[i].valid)
1863                                printk(" C%d[C%d]", i,
1864                                       pr->power.states[i].type);
1865                printk(")\n");
1866
1867#ifndef CONFIG_CPU_IDLE
1868                if (pr->id == 0) {
1869                        pm_idle_save = pm_idle;
1870                        pm_idle = acpi_processor_idle;
1871                }
1872#endif
1873        }
1874
1875        /* 'power' [R] */
1876        entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1877                                 S_IRUGO, acpi_device_dir(device),
1878                                 &acpi_processor_power_fops,
1879                                 acpi_driver_data(device));
1880        if (!entry)
1881                return -EIO;
1882        return 0;
1883}
1884
1885int acpi_processor_power_exit(struct acpi_processor *pr,
1886                              struct acpi_device *device)
1887{
1888        if (boot_option_idle_override)
1889                return 0;
1890
1891#ifdef CONFIG_CPU_IDLE
1892        cpuidle_unregister_device(&pr->power.dev);
1893#endif
1894        pr->flags.power_setup_done = 0;
1895
1896        if (acpi_device_dir(device))
1897                remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1898                                  acpi_device_dir(device));
1899
1900#ifndef CONFIG_CPU_IDLE
1901
1902        /* Unregister the idle handler when processor #0 is removed. */
1903        if (pr->id == 0) {
1904                if (pm_idle_save)
1905                        pm_idle = pm_idle_save;
1906
1907                /*
1908                 * We are about to unload the current idle thread pm callback
1909                 * (pm_idle), Wait for all processors to update cached/local
1910                 * copies of pm_idle before proceeding.
1911                 */
1912                cpu_idle_wait();
1913#ifdef CONFIG_SMP
1914                pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1915                                &acpi_processor_latency_notifier);
1916#endif
1917        }
1918#endif
1919
1920        return 0;
1921}