Showing error 1233

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: arch/x86/kernel/smpboot.c
Line in file: 1415
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2012-05-21 20:30:05 UTC


Source:

   1/*
   2 *        x86 SMP booting functions
   3 *
   4 *        (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
   5 *        (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
   6 *        Copyright 2001 Andi Kleen, SuSE Labs.
   7 *
   8 *        Much of the core SMP work is based on previous work by Thomas Radke, to
   9 *        whom a great many thanks are extended.
  10 *
  11 *        Thanks to Intel for making available several different Pentium,
  12 *        Pentium Pro and Pentium-II/Xeon MP machines.
  13 *        Original development of Linux SMP code supported by Caldera.
  14 *
  15 *        This code is released under the GNU General Public License version 2 or
  16 *        later.
  17 *
  18 *        Fixes
  19 *                Felix Koop        :        NR_CPUS used properly
  20 *                Jose Renau        :        Handle single CPU case.
  21 *                Alan Cox        :        By repeated request 8) - Total BogoMIPS report.
  22 *                Greg Wright        :        Fix for kernel stacks panic.
  23 *                Erich Boleyn        :        MP v1.4 and additional changes.
  24 *        Matthias Sattler        :        Changes for 2.1 kernel map.
  25 *        Michel Lespinasse        :        Changes for 2.1 kernel map.
  26 *        Michael Chastain        :        Change trampoline.S to gnu as.
  27 *                Alan Cox        :        Dumb bug: 'B' step PPro's are fine
  28 *                Ingo Molnar        :        Added APIC timers, based on code
  29 *                                        from Jose Renau
  30 *                Ingo Molnar        :        various cleanups and rewrites
  31 *                Tigran Aivazian        :        fixed "0.00 in /proc/uptime on SMP" bug.
  32 *        Maciej W. Rozycki        :        Bits for genuine 82489DX APICs
  33 *        Andi Kleen                :        Changed for SMP boot into long mode.
  34 *                Martin J. Bligh        :         Added support for multi-quad systems
  35 *                Dave Jones        :        Report invalid combinations of Athlon CPUs.
  36 *                Rusty Russell        :        Hacked into shape for new "hotplug" boot process.
  37 *      Andi Kleen              :       Converted to new state machine.
  38 *        Ashok Raj                :         CPU hotplug support
  39 *        Glauber Costa                :        i386 and x86_64 integration
  40 */
  41
  42#include <linux/init.h>
  43#include <linux/smp.h>
  44#include <linux/module.h>
  45#include <linux/sched.h>
  46#include <linux/percpu.h>
  47#include <linux/bootmem.h>
  48#include <linux/err.h>
  49#include <linux/nmi.h>
  50
  51#include <asm/acpi.h>
  52#include <asm/desc.h>
  53#include <asm/nmi.h>
  54#include <asm/irq.h>
  55#include <asm/idle.h>
  56#include <asm/smp.h>
  57#include <asm/trampoline.h>
  58#include <asm/cpu.h>
  59#include <asm/numa.h>
  60#include <asm/pgtable.h>
  61#include <asm/tlbflush.h>
  62#include <asm/mtrr.h>
  63#include <asm/vmi.h>
  64#include <asm/genapic.h>
  65#include <linux/mc146818rtc.h>
  66
  67#include <mach_apic.h>
  68#include <mach_wakecpu.h>
  69#include <smpboot_hooks.h>
  70
  71#ifdef CONFIG_X86_32
  72u8 apicid_2_node[MAX_APICID];
  73static int low_mappings;
  74#endif
  75
  76/* State of each CPU */
  77DEFINE_PER_CPU(int, cpu_state) = { 0 };
  78
  79/* Store all idle threads, this can be reused instead of creating
  80* a new thread. Also avoids complicated thread destroy functionality
  81* for idle threads.
  82*/
  83#ifdef CONFIG_HOTPLUG_CPU
  84/*
  85 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
  86 * removed after init for !CONFIG_HOTPLUG_CPU.
  87 */
  88static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
  89#define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
  90#define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
  91#else
  92static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
  93#define get_idle_for_cpu(x)      (idle_thread_array[(x)])
  94#define set_idle_for_cpu(x, p)   (idle_thread_array[(x)] = (p))
  95#endif
  96
  97/* Number of siblings per CPU package */
  98int smp_num_siblings = 1;
  99EXPORT_SYMBOL(smp_num_siblings);
 100
 101/* Last level cache ID of each logical CPU */
 102DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 103
 104/* bitmap of online cpus */
 105cpumask_t cpu_online_map __read_mostly;
 106EXPORT_SYMBOL(cpu_online_map);
 107
 108cpumask_t cpu_callin_map;
 109cpumask_t cpu_callout_map;
 110cpumask_t cpu_possible_map;
 111EXPORT_SYMBOL(cpu_possible_map);
 112
 113/* representing HT siblings of each logical CPU */
 114DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
 115EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 116
 117/* representing HT and core siblings of each logical CPU */
 118DEFINE_PER_CPU(cpumask_t, cpu_core_map);
 119EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 120
 121/* Per CPU bogomips and other parameters */
 122DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 123EXPORT_PER_CPU_SYMBOL(cpu_info);
 124
 125static atomic_t init_deasserted;
 126
 127
 128/* representing cpus for which sibling maps can be computed */
 129static cpumask_t cpu_sibling_setup_map;
 130
 131/* Set if we find a B stepping CPU */
 132static int __cpuinitdata smp_b_stepping;
 133
 134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
 135
 136/* which logical CPUs are on which nodes */
 137cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
 138                                { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
 139EXPORT_SYMBOL(node_to_cpumask_map);
 140/* which node each logical CPU is on */
 141int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
 142EXPORT_SYMBOL(cpu_to_node_map);
 143
 144/* set up a mapping between cpu and node. */
 145static void map_cpu_to_node(int cpu, int node)
 146{
 147        printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
 148        cpu_set(cpu, node_to_cpumask_map[node]);
 149        cpu_to_node_map[cpu] = node;
 150}
 151
 152/* undo a mapping between cpu and node. */
 153static void unmap_cpu_to_node(int cpu)
 154{
 155        int node;
 156
 157        printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
 158        for (node = 0; node < MAX_NUMNODES; node++)
 159                cpu_clear(cpu, node_to_cpumask_map[node]);
 160        cpu_to_node_map[cpu] = 0;
 161}
 162#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
 163#define map_cpu_to_node(cpu, node)        ({})
 164#define unmap_cpu_to_node(cpu)        ({})
 165#endif
 166
 167#ifdef CONFIG_X86_32
 168static int boot_cpu_logical_apicid;
 169
 170u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
 171                                        { [0 ... NR_CPUS-1] = BAD_APICID };
 172
 173static void map_cpu_to_logical_apicid(void)
 174{
 175        int cpu = smp_processor_id();
 176        int apicid = logical_smp_processor_id();
 177        int node = apicid_to_node(apicid);
 178
 179        if (!node_online(node))
 180                node = first_online_node;
 181
 182        cpu_2_logical_apicid[cpu] = apicid;
 183        map_cpu_to_node(cpu, node);
 184}
 185
 186void numa_remove_cpu(int cpu)
 187{
 188        cpu_2_logical_apicid[cpu] = BAD_APICID;
 189        unmap_cpu_to_node(cpu);
 190}
 191#else
 192#define map_cpu_to_logical_apicid()  do {} while (0)
 193#endif
 194
 195/*
 196 * Report back to the Boot Processor.
 197 * Running on AP.
 198 */
 199static void __cpuinit smp_callin(void)
 200{
 201        int cpuid, phys_id;
 202        unsigned long timeout;
 203
 204        /*
 205         * If waken up by an INIT in an 82489DX configuration
 206         * we may get here before an INIT-deassert IPI reaches
 207         * our local APIC.  We have to wait for the IPI or we'll
 208         * lock up on an APIC access.
 209         */
 210        wait_for_init_deassert(&init_deasserted);
 211
 212        /*
 213         * (This works even if the APIC is not enabled.)
 214         */
 215        phys_id = read_apic_id();
 216        cpuid = smp_processor_id();
 217        if (cpu_isset(cpuid, cpu_callin_map)) {
 218                panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
 219                                        phys_id, cpuid);
 220        }
 221        pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
 222
 223        /*
 224         * STARTUP IPIs are fragile beasts as they might sometimes
 225         * trigger some glue motherboard logic. Complete APIC bus
 226         * silence for 1 second, this overestimates the time the
 227         * boot CPU is spending to send the up to 2 STARTUP IPIs
 228         * by a factor of two. This should be enough.
 229         */
 230
 231        /*
 232         * Waiting 2s total for startup (udelay is not yet working)
 233         */
 234        timeout = jiffies + 2*HZ;
 235        while (time_before(jiffies, timeout)) {
 236                /*
 237                 * Has the boot CPU finished it's STARTUP sequence?
 238                 */
 239                if (cpu_isset(cpuid, cpu_callout_map))
 240                        break;
 241                cpu_relax();
 242        }
 243
 244        if (!time_before(jiffies, timeout)) {
 245                panic("%s: CPU%d started up but did not get a callout!\n",
 246                      __func__, cpuid);
 247        }
 248
 249        /*
 250         * the boot CPU has finished the init stage and is spinning
 251         * on callin_map until we finish. We are free to set up this
 252         * CPU, first the APIC. (this is probably redundant on most
 253         * boards)
 254         */
 255
 256        pr_debug("CALLIN, before setup_local_APIC().\n");
 257        smp_callin_clear_local_apic();
 258        setup_local_APIC();
 259        end_local_APIC_setup();
 260        map_cpu_to_logical_apicid();
 261
 262        notify_cpu_starting(cpuid);
 263        /*
 264         * Get our bogomips.
 265         *
 266         * Need to enable IRQs because it can take longer and then
 267         * the NMI watchdog might kill us.
 268         */
 269        local_irq_enable();
 270        calibrate_delay();
 271        local_irq_disable();
 272        pr_debug("Stack at about %p\n", &cpuid);
 273
 274        /*
 275         * Save our processor parameters
 276         */
 277        smp_store_cpu_info(cpuid);
 278
 279        /*
 280         * Allow the master to continue.
 281         */
 282        cpu_set(cpuid, cpu_callin_map);
 283}
 284
 285static int __cpuinitdata unsafe_smp;
 286
 287/*
 288 * Activate a secondary processor.
 289 */
 290static void __cpuinit start_secondary(void *unused)
 291{
 292        /*
 293         * Don't put *anything* before cpu_init(), SMP booting is too
 294         * fragile that we want to limit the things done here to the
 295         * most necessary things.
 296         */
 297        vmi_bringup();
 298        cpu_init();
 299        preempt_disable();
 300        smp_callin();
 301
 302        /* otherwise gcc will move up smp_processor_id before the cpu_init */
 303        barrier();
 304        /*
 305         * Check TSC synchronization with the BP:
 306         */
 307        check_tsc_sync_target();
 308
 309        if (nmi_watchdog == NMI_IO_APIC) {
 310                disable_8259A_irq(0);
 311                enable_NMI_through_LVT0();
 312                enable_8259A_irq(0);
 313        }
 314
 315#ifdef CONFIG_X86_32
 316        while (low_mappings)
 317                cpu_relax();
 318        __flush_tlb_all();
 319#endif
 320
 321        /* This must be done before setting cpu_online_map */
 322        set_cpu_sibling_map(raw_smp_processor_id());
 323        wmb();
 324
 325        /*
 326         * We need to hold call_lock, so there is no inconsistency
 327         * between the time smp_call_function() determines number of
 328         * IPI recipients, and the time when the determination is made
 329         * for which cpus receive the IPI. Holding this
 330         * lock helps us to not include this cpu in a currently in progress
 331         * smp_call_function().
 332         *
 333         * We need to hold vector_lock so there the set of online cpus
 334         * does not change while we are assigning vectors to cpus.  Holding
 335         * this lock ensures we don't half assign or remove an irq from a cpu.
 336         */
 337        ipi_call_lock();
 338        lock_vector_lock();
 339        __setup_vector_irq(smp_processor_id());
 340        cpu_set(smp_processor_id(), cpu_online_map);
 341        unlock_vector_lock();
 342        ipi_call_unlock();
 343        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 344
 345        /* enable local interrupts */
 346        local_irq_enable();
 347
 348        setup_secondary_clock();
 349
 350        wmb();
 351        cpu_idle();
 352}
 353
 354static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
 355{
 356        /*
 357         * Mask B, Pentium, but not Pentium MMX
 358         */
 359        if (c->x86_vendor == X86_VENDOR_INTEL &&
 360            c->x86 == 5 &&
 361            c->x86_mask >= 1 && c->x86_mask <= 4 &&
 362            c->x86_model <= 3)
 363                /*
 364                 * Remember we have B step Pentia with bugs
 365                 */
 366                smp_b_stepping = 1;
 367
 368        /*
 369         * Certain Athlons might work (for various values of 'work') in SMP
 370         * but they are not certified as MP capable.
 371         */
 372        if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
 373
 374                if (num_possible_cpus() == 1)
 375                        goto valid_k7;
 376
 377                /* Athlon 660/661 is valid. */
 378                if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
 379                    (c->x86_mask == 1)))
 380                        goto valid_k7;
 381
 382                /* Duron 670 is valid */
 383                if ((c->x86_model == 7) && (c->x86_mask == 0))
 384                        goto valid_k7;
 385
 386                /*
 387                 * Athlon 662, Duron 671, and Athlon >model 7 have capability
 388                 * bit. It's worth noting that the A5 stepping (662) of some
 389                 * Athlon XP's have the MP bit set.
 390                 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 391                 * more.
 392                 */
 393                if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
 394                    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
 395                     (c->x86_model > 7))
 396                        if (cpu_has_mp)
 397                                goto valid_k7;
 398
 399                /* If we get here, not a certified SMP capable AMD system. */
 400                unsafe_smp = 1;
 401        }
 402
 403valid_k7:
 404        ;
 405}
 406
 407static void __cpuinit smp_checks(void)
 408{
 409        if (smp_b_stepping)
 410                printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
 411                                    "with B stepping processors.\n");
 412
 413        /*
 414         * Don't taint if we are running SMP kernel on a single non-MP
 415         * approved Athlon
 416         */
 417        if (unsafe_smp && num_online_cpus() > 1) {
 418                printk(KERN_INFO "WARNING: This combination of AMD"
 419                        "processors is not suitable for SMP.\n");
 420                add_taint(TAINT_UNSAFE_SMP);
 421        }
 422}
 423
 424/*
 425 * The bootstrap kernel entry code has set these up. Save them for
 426 * a given CPU
 427 */
 428
 429void __cpuinit smp_store_cpu_info(int id)
 430{
 431        struct cpuinfo_x86 *c = &cpu_data(id);
 432
 433        *c = boot_cpu_data;
 434        c->cpu_index = id;
 435        if (id != 0)
 436                identify_secondary_cpu(c);
 437        smp_apply_quirks(c);
 438}
 439
 440
 441void __cpuinit set_cpu_sibling_map(int cpu)
 442{
 443        int i;
 444        struct cpuinfo_x86 *c = &cpu_data(cpu);
 445
 446        cpu_set(cpu, cpu_sibling_setup_map);
 447
 448        if (smp_num_siblings > 1) {
 449                for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
 450                        if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
 451                            c->cpu_core_id == cpu_data(i).cpu_core_id) {
 452                                cpu_set(i, per_cpu(cpu_sibling_map, cpu));
 453                                cpu_set(cpu, per_cpu(cpu_sibling_map, i));
 454                                cpu_set(i, per_cpu(cpu_core_map, cpu));
 455                                cpu_set(cpu, per_cpu(cpu_core_map, i));
 456                                cpu_set(i, c->llc_shared_map);
 457                                cpu_set(cpu, cpu_data(i).llc_shared_map);
 458                        }
 459                }
 460        } else {
 461                cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
 462        }
 463
 464        cpu_set(cpu, c->llc_shared_map);
 465
 466        if (current_cpu_data.x86_max_cores == 1) {
 467                per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
 468                c->booted_cores = 1;
 469                return;
 470        }
 471
 472        for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
 473                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
 474                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
 475                        cpu_set(i, c->llc_shared_map);
 476                        cpu_set(cpu, cpu_data(i).llc_shared_map);
 477                }
 478                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
 479                        cpu_set(i, per_cpu(cpu_core_map, cpu));
 480                        cpu_set(cpu, per_cpu(cpu_core_map, i));
 481                        /*
 482                         *  Does this new cpu bringup a new core?
 483                         */
 484                        if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
 485                                /*
 486                                 * for each core in package, increment
 487                                 * the booted_cores for this new cpu
 488                                 */
 489                                if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
 490                                        c->booted_cores++;
 491                                /*
 492                                 * increment the core count for all
 493                                 * the other cpus in this package
 494                                 */
 495                                if (i != cpu)
 496                                        cpu_data(i).booted_cores++;
 497                        } else if (i != cpu && !c->booted_cores)
 498                                c->booted_cores = cpu_data(i).booted_cores;
 499                }
 500        }
 501}
 502
 503/* maps the cpu to the sched domain representing multi-core */
 504cpumask_t cpu_coregroup_map(int cpu)
 505{
 506        struct cpuinfo_x86 *c = &cpu_data(cpu);
 507        /*
 508         * For perf, we return last level cache shared map.
 509         * And for power savings, we return cpu_core_map
 510         */
 511        if (sched_mc_power_savings || sched_smt_power_savings)
 512                return per_cpu(cpu_core_map, cpu);
 513        else
 514                return c->llc_shared_map;
 515}
 516
 517static void impress_friends(void)
 518{
 519        int cpu;
 520        unsigned long bogosum = 0;
 521        /*
 522         * Allow the user to impress friends.
 523         */
 524        pr_debug("Before bogomips.\n");
 525        for_each_possible_cpu(cpu)
 526                if (cpu_isset(cpu, cpu_callout_map))
 527                        bogosum += cpu_data(cpu).loops_per_jiffy;
 528        printk(KERN_INFO
 529                "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 530                num_online_cpus(),
 531                bogosum/(500000/HZ),
 532                (bogosum/(5000/HZ))%100);
 533
 534        pr_debug("Before bogocount - setting activated=1.\n");
 535}
 536
 537static inline void __inquire_remote_apic(int apicid)
 538{
 539        unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
 540        char *names[] = { "ID", "VERSION", "SPIV" };
 541        int timeout;
 542        u32 status;
 543
 544        printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
 545
 546        for (i = 0; i < ARRAY_SIZE(regs); i++) {
 547                printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
 548
 549                /*
 550                 * Wait for idle.
 551                 */
 552                status = safe_apic_wait_icr_idle();
 553                if (status)
 554                        printk(KERN_CONT
 555                               "a previous APIC delivery may have failed\n");
 556
 557                apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
 558
 559                timeout = 0;
 560                do {
 561                        udelay(100);
 562                        status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
 563                } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
 564
 565                switch (status) {
 566                case APIC_ICR_RR_VALID:
 567                        status = apic_read(APIC_RRR);
 568                        printk(KERN_CONT "%08x\n", status);
 569                        break;
 570                default:
 571                        printk(KERN_CONT "failed\n");
 572                }
 573        }
 574}
 575
 576#ifdef WAKE_SECONDARY_VIA_NMI
 577/*
 578 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 579 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 580 * won't ... remember to clear down the APIC, etc later.
 581 */
 582static int __devinit
 583wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
 584{
 585        unsigned long send_status, accept_status = 0;
 586        int maxlvt;
 587
 588        /* Target chip */
 589        /* Boot on the stack */
 590        /* Kick the second */
 591        apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
 592
 593        pr_debug("Waiting for send to finish...\n");
 594        send_status = safe_apic_wait_icr_idle();
 595
 596        /*
 597         * Give the other CPU some time to accept the IPI.
 598         */
 599        udelay(200);
 600        if (APIC_INTEGRATED(apic_version[phys_apicid])) {
 601                maxlvt = lapic_get_maxlvt();
 602                if (maxlvt > 3)                        /* Due to the Pentium erratum 3AP.  */
 603                        apic_write(APIC_ESR, 0);
 604                accept_status = (apic_read(APIC_ESR) & 0xEF);
 605        }
 606        pr_debug("NMI sent.\n");
 607
 608        if (send_status)
 609                printk(KERN_ERR "APIC never delivered???\n");
 610        if (accept_status)
 611                printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 612
 613        return (send_status | accept_status);
 614}
 615#endif        /* WAKE_SECONDARY_VIA_NMI */
 616
 617#ifdef WAKE_SECONDARY_VIA_INIT
 618static int __devinit
 619wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
 620{
 621        unsigned long send_status, accept_status = 0;
 622        int maxlvt, num_starts, j;
 623
 624        if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
 625                send_status = uv_wakeup_secondary(phys_apicid, start_eip);
 626                atomic_set(&init_deasserted, 1);
 627                return send_status;
 628        }
 629
 630        maxlvt = lapic_get_maxlvt();
 631
 632        /*
 633         * Be paranoid about clearing APIC errors.
 634         */
 635        if (APIC_INTEGRATED(apic_version[phys_apicid])) {
 636                if (maxlvt > 3)                /* Due to the Pentium erratum 3AP.  */
 637                        apic_write(APIC_ESR, 0);
 638                apic_read(APIC_ESR);
 639        }
 640
 641        pr_debug("Asserting INIT.\n");
 642
 643        /*
 644         * Turn INIT on target chip
 645         */
 646        /*
 647         * Send IPI
 648         */
 649        apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
 650                       phys_apicid);
 651
 652        pr_debug("Waiting for send to finish...\n");
 653        send_status = safe_apic_wait_icr_idle();
 654
 655        mdelay(10);
 656
 657        pr_debug("Deasserting INIT.\n");
 658
 659        /* Target chip */
 660        /* Send IPI */
 661        apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 662
 663        pr_debug("Waiting for send to finish...\n");
 664        send_status = safe_apic_wait_icr_idle();
 665
 666        mb();
 667        atomic_set(&init_deasserted, 1);
 668
 669        /*
 670         * Should we send STARTUP IPIs ?
 671         *
 672         * Determine this based on the APIC version.
 673         * If we don't have an integrated APIC, don't send the STARTUP IPIs.
 674         */
 675        if (APIC_INTEGRATED(apic_version[phys_apicid]))
 676                num_starts = 2;
 677        else
 678                num_starts = 0;
 679
 680        /*
 681         * Paravirt / VMI wants a startup IPI hook here to set up the
 682         * target processor state.
 683         */
 684        startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
 685                         (unsigned long)stack_start.sp);
 686
 687        /*
 688         * Run STARTUP IPI loop.
 689         */
 690        pr_debug("#startup loops: %d.\n", num_starts);
 691
 692        for (j = 1; j <= num_starts; j++) {
 693                pr_debug("Sending STARTUP #%d.\n", j);
 694                if (maxlvt > 3)                /* Due to the Pentium erratum 3AP.  */
 695                        apic_write(APIC_ESR, 0);
 696                apic_read(APIC_ESR);
 697                pr_debug("After apic_write.\n");
 698
 699                /*
 700                 * STARTUP IPI
 701                 */
 702
 703                /* Target chip */
 704                /* Boot on the stack */
 705                /* Kick the second */
 706                apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
 707                               phys_apicid);
 708
 709                /*
 710                 * Give the other CPU some time to accept the IPI.
 711                 */
 712                udelay(300);
 713
 714                pr_debug("Startup point 1.\n");
 715
 716                pr_debug("Waiting for send to finish...\n");
 717                send_status = safe_apic_wait_icr_idle();
 718
 719                /*
 720                 * Give the other CPU some time to accept the IPI.
 721                 */
 722                udelay(200);
 723                if (maxlvt > 3)                /* Due to the Pentium erratum 3AP.  */
 724                        apic_write(APIC_ESR, 0);
 725                accept_status = (apic_read(APIC_ESR) & 0xEF);
 726                if (send_status || accept_status)
 727                        break;
 728        }
 729        pr_debug("After Startup.\n");
 730
 731        if (send_status)
 732                printk(KERN_ERR "APIC never delivered???\n");
 733        if (accept_status)
 734                printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 735
 736        return (send_status | accept_status);
 737}
 738#endif        /* WAKE_SECONDARY_VIA_INIT */
 739
 740struct create_idle {
 741        struct work_struct work;
 742        struct task_struct *idle;
 743        struct completion done;
 744        int cpu;
 745};
 746
 747static void __cpuinit do_fork_idle(struct work_struct *work)
 748{
 749        struct create_idle *c_idle =
 750                container_of(work, struct create_idle, work);
 751
 752        c_idle->idle = fork_idle(c_idle->cpu);
 753        complete(&c_idle->done);
 754}
 755
 756#ifdef CONFIG_X86_64
 757
 758/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
 759static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
 760{
 761        if (!after_bootmem)
 762                free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
 763}
 764
 765/*
 766 * Allocate node local memory for the AP pda.
 767 *
 768 * Must be called after the _cpu_pda pointer table is initialized.
 769 */
 770int __cpuinit get_local_pda(int cpu)
 771{
 772        struct x8664_pda *oldpda, *newpda;
 773        unsigned long size = sizeof(struct x8664_pda);
 774        int node = cpu_to_node(cpu);
 775
 776        if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
 777                return 0;
 778
 779        oldpda = cpu_pda(cpu);
 780        newpda = kmalloc_node(size, GFP_ATOMIC, node);
 781        if (!newpda) {
 782                printk(KERN_ERR "Could not allocate node local PDA "
 783                        "for CPU %d on node %d\n", cpu, node);
 784
 785                if (oldpda)
 786                        return 0;        /* have a usable pda */
 787                else
 788                        return -1;
 789        }
 790
 791        if (oldpda) {
 792                memcpy(newpda, oldpda, size);
 793                free_bootmem_pda(oldpda);
 794        }
 795
 796        newpda->in_bootmem = 0;
 797        cpu_pda(cpu) = newpda;
 798        return 0;
 799}
 800#endif /* CONFIG_X86_64 */
 801
 802static int __cpuinit do_boot_cpu(int apicid, int cpu)
 803/*
 804 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 805 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 806 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
 807 */
 808{
 809        unsigned long boot_error = 0;
 810        int timeout;
 811        unsigned long start_ip;
 812        unsigned short nmi_high = 0, nmi_low = 0;
 813        struct create_idle c_idle = {
 814                .cpu = cpu,
 815                .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
 816        };
 817        INIT_WORK(&c_idle.work, do_fork_idle);
 818
 819#ifdef CONFIG_X86_64
 820        /* Allocate node local memory for AP pdas */
 821        if (cpu > 0) {
 822                boot_error = get_local_pda(cpu);
 823                if (boot_error)
 824                        goto restore_state;
 825                        /* if can't get pda memory, can't start cpu */
 826        }
 827#endif
 828
 829        alternatives_smp_switch(1);
 830
 831        c_idle.idle = get_idle_for_cpu(cpu);
 832
 833        /*
 834         * We can't use kernel_thread since we must avoid to
 835         * reschedule the child.
 836         */
 837        if (c_idle.idle) {
 838                c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
 839                        (THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
 840                init_idle(c_idle.idle, cpu);
 841                goto do_rest;
 842        }
 843
 844        if (!keventd_up() || current_is_keventd())
 845                c_idle.work.func(&c_idle.work);
 846        else {
 847                schedule_work(&c_idle.work);
 848                wait_for_completion(&c_idle.done);
 849        }
 850
 851        if (IS_ERR(c_idle.idle)) {
 852                printk("failed fork for CPU %d\n", cpu);
 853                return PTR_ERR(c_idle.idle);
 854        }
 855
 856        set_idle_for_cpu(cpu, c_idle.idle);
 857do_rest:
 858#ifdef CONFIG_X86_32
 859        per_cpu(current_task, cpu) = c_idle.idle;
 860        init_gdt(cpu);
 861        /* Stack for startup_32 can be just as for start_secondary onwards */
 862        irq_ctx_init(cpu);
 863#else
 864        cpu_pda(cpu)->pcurrent = c_idle.idle;
 865        clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
 866#endif
 867        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 868        initial_code = (unsigned long)start_secondary;
 869        stack_start.sp = (void *) c_idle.idle->thread.sp;
 870
 871        /* start_ip had better be page-aligned! */
 872        start_ip = setup_trampoline();
 873
 874        /* So we see what's up   */
 875        printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
 876                          cpu, apicid, start_ip);
 877
 878        /*
 879         * This grunge runs the startup process for
 880         * the targeted processor.
 881         */
 882
 883        atomic_set(&init_deasserted, 0);
 884
 885        if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 886
 887                pr_debug("Setting warm reset code and vector.\n");
 888
 889                store_NMI_vector(&nmi_high, &nmi_low);
 890
 891                smpboot_setup_warm_reset_vector(start_ip);
 892                /*
 893                 * Be paranoid about clearing APIC errors.
 894                */
 895                if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 896                        apic_write(APIC_ESR, 0);
 897                        apic_read(APIC_ESR);
 898                }
 899        }
 900
 901        /*
 902         * Starting actual IPI sequence...
 903         */
 904        boot_error = wakeup_secondary_cpu(apicid, start_ip);
 905
 906        if (!boot_error) {
 907                /*
 908                 * allow APs to start initializing.
 909                 */
 910                pr_debug("Before Callout %d.\n", cpu);
 911                cpu_set(cpu, cpu_callout_map);
 912                pr_debug("After Callout %d.\n", cpu);
 913
 914                /*
 915                 * Wait 5s total for a response
 916                 */
 917                for (timeout = 0; timeout < 50000; timeout++) {
 918                        if (cpu_isset(cpu, cpu_callin_map))
 919                                break;        /* It has booted */
 920                        udelay(100);
 921                }
 922
 923                if (cpu_isset(cpu, cpu_callin_map)) {
 924                        /* number CPUs logically, starting from 1 (BSP is 0) */
 925                        pr_debug("OK.\n");
 926                        printk(KERN_INFO "CPU%d: ", cpu);
 927                        print_cpu_info(&cpu_data(cpu));
 928                        pr_debug("CPU has booted.\n");
 929                } else {
 930                        boot_error = 1;
 931                        if (*((volatile unsigned char *)trampoline_base)
 932                                        == 0xA5)
 933                                /* trampoline started but...? */
 934                                printk(KERN_ERR "Stuck ??\n");
 935                        else
 936                                /* trampoline code not run */
 937                                printk(KERN_ERR "Not responding.\n");
 938                        if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
 939                                inquire_remote_apic(apicid);
 940                }
 941        }
 942#ifdef CONFIG_X86_64
 943restore_state:
 944#endif
 945        if (boot_error) {
 946                /* Try to put things back the way they were before ... */
 947                numa_remove_cpu(cpu); /* was set by numa_add_cpu */
 948                cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
 949                cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
 950                cpu_clear(cpu, cpu_present_map);
 951                per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
 952        }
 953
 954        /* mark "stuck" area as not stuck */
 955        *((volatile unsigned long *)trampoline_base) = 0;
 956
 957        /*
 958         * Cleanup possible dangling ends...
 959         */
 960        smpboot_restore_warm_reset_vector();
 961
 962        return boot_error;
 963}
 964
 965int __cpuinit native_cpu_up(unsigned int cpu)
 966{
 967        int apicid = cpu_present_to_apicid(cpu);
 968        unsigned long flags;
 969        int err;
 970
 971        WARN_ON(irqs_disabled());
 972
 973        pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 974
 975        if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
 976            !physid_isset(apicid, phys_cpu_present_map)) {
 977                printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
 978                return -EINVAL;
 979        }
 980
 981        /*
 982         * Already booted CPU?
 983         */
 984        if (cpu_isset(cpu, cpu_callin_map)) {
 985                pr_debug("do_boot_cpu %d Already started\n", cpu);
 986                return -ENOSYS;
 987        }
 988
 989        /*
 990         * Save current MTRR state in case it was changed since early boot
 991         * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
 992         */
 993        mtrr_save_state();
 994
 995        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 996
 997#ifdef CONFIG_X86_32
 998        /* init low mem mapping */
 999        clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1000                min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1001        flush_tlb_all();
1002        low_mappings = 1;
1003
1004        err = do_boot_cpu(apicid, cpu);
1005
1006        zap_low_mappings();
1007        low_mappings = 0;
1008#else
1009        err = do_boot_cpu(apicid, cpu);
1010#endif
1011        if (err) {
1012                pr_debug("do_boot_cpu failed %d\n", err);
1013                return -EIO;
1014        }
1015
1016        /*
1017         * Check TSC synchronization with the AP (keep irqs disabled
1018         * while doing so):
1019         */
1020        local_irq_save(flags);
1021        check_tsc_sync_source(cpu);
1022        local_irq_restore(flags);
1023
1024        while (!cpu_online(cpu)) {
1025                cpu_relax();
1026                touch_nmi_watchdog();
1027        }
1028
1029        return 0;
1030}
1031
1032/*
1033 * Fall back to non SMP mode after errors.
1034 *
1035 * RED-PEN audit/test this more. I bet there is more state messed up here.
1036 */
1037static __init void disable_smp(void)
1038{
1039        cpu_present_map = cpumask_of_cpu(0);
1040        cpu_possible_map = cpumask_of_cpu(0);
1041        smpboot_clear_io_apic_irqs();
1042
1043        if (smp_found_config)
1044                physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1045        else
1046                physid_set_mask_of_physid(0, &phys_cpu_present_map);
1047        map_cpu_to_logical_apicid();
1048        cpu_set(0, per_cpu(cpu_sibling_map, 0));
1049        cpu_set(0, per_cpu(cpu_core_map, 0));
1050}
1051
1052/*
1053 * Various sanity checks.
1054 */
1055static int __init smp_sanity_check(unsigned max_cpus)
1056{
1057        preempt_disable();
1058
1059#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
1060        if (def_to_bigsmp && nr_cpu_ids > 8) {
1061                unsigned int cpu;
1062                unsigned nr;
1063
1064                printk(KERN_WARNING
1065                       "More than 8 CPUs detected - skipping them.\n"
1066                       "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
1067
1068                nr = 0;
1069                for_each_present_cpu(cpu) {
1070                        if (nr >= 8)
1071                                cpu_clear(cpu, cpu_present_map);
1072                        nr++;
1073                }
1074
1075                nr = 0;
1076                for_each_possible_cpu(cpu) {
1077                        if (nr >= 8)
1078                                cpu_clear(cpu, cpu_possible_map);
1079                        nr++;
1080                }
1081
1082                nr_cpu_ids = 8;
1083        }
1084#endif
1085
1086        if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1087                printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
1088                                    "by the BIOS.\n", hard_smp_processor_id());
1089                physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1090        }
1091
1092        /*
1093         * If we couldn't find an SMP configuration at boot time,
1094         * get out of here now!
1095         */
1096        if (!smp_found_config && !acpi_lapic) {
1097                preempt_enable();
1098                printk(KERN_NOTICE "SMP motherboard not detected.\n");
1099                disable_smp();
1100                if (APIC_init_uniprocessor())
1101                        printk(KERN_NOTICE "Local APIC not detected."
1102                                           " Using dummy APIC emulation.\n");
1103                return -1;
1104        }
1105
1106        /*
1107         * Should not be necessary because the MP table should list the boot
1108         * CPU too, but we do it for the sake of robustness anyway.
1109         */
1110        if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1111                printk(KERN_NOTICE
1112                        "weird, boot CPU (#%d) not listed by the BIOS.\n",
1113                        boot_cpu_physical_apicid);
1114                physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1115        }
1116        preempt_enable();
1117
1118        /*
1119         * If we couldn't find a local APIC, then get out of here now!
1120         */
1121        if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1122            !cpu_has_apic) {
1123                printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1124                        boot_cpu_physical_apicid);
1125                printk(KERN_ERR "... forcing use of dummy APIC emulation."
1126                                "(tell your hw vendor)\n");
1127                smpboot_clear_io_apic();
1128                return -1;
1129        }
1130
1131        verify_local_APIC();
1132
1133        /*
1134         * If SMP should be disabled, then really disable it!
1135         */
1136        if (!max_cpus) {
1137                printk(KERN_INFO "SMP mode deactivated.\n");
1138                smpboot_clear_io_apic();
1139
1140                localise_nmi_watchdog();
1141
1142                connect_bsp_APIC();
1143                setup_local_APIC();
1144                end_local_APIC_setup();
1145                return -1;
1146        }
1147
1148        return 0;
1149}
1150
1151static void __init smp_cpu_index_default(void)
1152{
1153        int i;
1154        struct cpuinfo_x86 *c;
1155
1156        for_each_possible_cpu(i) {
1157                c = &cpu_data(i);
1158                /* mark all to hotplug */
1159                c->cpu_index = NR_CPUS;
1160        }
1161}
1162
1163/*
1164 * Prepare for SMP bootup.  The MP table or ACPI has been read
1165 * earlier.  Just do some sanity checking here and enable APIC mode.
1166 */
1167void __init native_smp_prepare_cpus(unsigned int max_cpus)
1168{
1169        preempt_disable();
1170        smp_cpu_index_default();
1171        current_cpu_data = boot_cpu_data;
1172        cpu_callin_map = cpumask_of_cpu(0);
1173        mb();
1174        /*
1175         * Setup boot CPU information
1176         */
1177        smp_store_cpu_info(0); /* Final full version of the data */
1178#ifdef CONFIG_X86_32
1179        boot_cpu_logical_apicid = logical_smp_processor_id();
1180#endif
1181        current_thread_info()->cpu = 0;  /* needed? */
1182        set_cpu_sibling_map(0);
1183
1184#ifdef CONFIG_X86_64
1185        enable_IR_x2apic();
1186        setup_apic_routing();
1187#endif
1188
1189        if (smp_sanity_check(max_cpus) < 0) {
1190                printk(KERN_INFO "SMP disabled\n");
1191                disable_smp();
1192                goto out;
1193        }
1194
1195        preempt_disable();
1196        if (read_apic_id() != boot_cpu_physical_apicid) {
1197                panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1198                     read_apic_id(), boot_cpu_physical_apicid);
1199                /* Or can we switch back to PIC here? */
1200        }
1201        preempt_enable();
1202
1203        connect_bsp_APIC();
1204
1205        /*
1206         * Switch from PIC to APIC mode.
1207         */
1208        setup_local_APIC();
1209
1210#ifdef CONFIG_X86_64
1211        /*
1212         * Enable IO APIC before setting up error vector
1213         */
1214        if (!skip_ioapic_setup && nr_ioapics)
1215                enable_IO_APIC();
1216#endif
1217        end_local_APIC_setup();
1218
1219        map_cpu_to_logical_apicid();
1220
1221        setup_portio_remap();
1222
1223        smpboot_setup_io_apic();
1224        /*
1225         * Set up local APIC timer on boot CPU.
1226         */
1227
1228        printk(KERN_INFO "CPU%d: ", 0);
1229        print_cpu_info(&cpu_data(0));
1230        setup_boot_clock();
1231
1232        if (is_uv_system())
1233                uv_system_init();
1234out:
1235        preempt_enable();
1236}
1237/*
1238 * Early setup to make printk work.
1239 */
1240void __init native_smp_prepare_boot_cpu(void)
1241{
1242        int me = smp_processor_id();
1243#ifdef CONFIG_X86_32
1244        init_gdt(me);
1245#endif
1246        switch_to_new_gdt();
1247        /* already set me in cpu_online_map in boot_cpu_init() */
1248        cpu_set(me, cpu_callout_map);
1249        per_cpu(cpu_state, me) = CPU_ONLINE;
1250}
1251
1252void __init native_smp_cpus_done(unsigned int max_cpus)
1253{
1254        pr_debug("Boot done.\n");
1255
1256        impress_friends();
1257        smp_checks();
1258#ifdef CONFIG_X86_IO_APIC
1259        setup_ioapic_dest();
1260#endif
1261        check_nmi_watchdog();
1262}
1263
1264/*
1265 * cpu_possible_map should be static, it cannot change as cpu's
1266 * are onlined, or offlined. The reason is per-cpu data-structures
1267 * are allocated by some modules at init time, and dont expect to
1268 * do this dynamically on cpu arrival/departure.
1269 * cpu_present_map on the other hand can change dynamically.
1270 * In case when cpu_hotplug is not compiled, then we resort to current
1271 * behaviour, which is cpu_possible == cpu_present.
1272 * - Ashok Raj
1273 *
1274 * Three ways to find out the number of additional hotplug CPUs:
1275 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1276 * - The user can overwrite it with additional_cpus=NUM
1277 * - Otherwise don't reserve additional CPUs.
1278 * We do this because additional CPUs waste a lot of memory.
1279 * -AK
1280 */
1281__init void prefill_possible_map(void)
1282{
1283        int i, possible;
1284
1285        /* no processor from mptable or madt */
1286        if (!num_processors)
1287                num_processors = 1;
1288
1289        possible = num_processors + disabled_cpus;
1290        if (possible > NR_CPUS)
1291                possible = NR_CPUS;
1292
1293        printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1294                possible, max_t(int, possible - num_processors, 0));
1295
1296        for (i = 0; i < possible; i++)
1297                cpu_set(i, cpu_possible_map);
1298
1299        nr_cpu_ids = possible;
1300}
1301
1302#ifdef CONFIG_HOTPLUG_CPU
1303
1304static void remove_siblinginfo(int cpu)
1305{
1306        int sibling;
1307        struct cpuinfo_x86 *c = &cpu_data(cpu);
1308
1309        for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1310                cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1311                /*/
1312                 * last thread sibling in this cpu core going down
1313                 */
1314                if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1315                        cpu_data(sibling).booted_cores--;
1316        }
1317
1318        for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1319                cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1320        cpus_clear(per_cpu(cpu_sibling_map, cpu));
1321        cpus_clear(per_cpu(cpu_core_map, cpu));
1322        c->phys_proc_id = 0;
1323        c->cpu_core_id = 0;
1324        cpu_clear(cpu, cpu_sibling_setup_map);
1325}
1326
1327static void __ref remove_cpu_from_maps(int cpu)
1328{
1329        cpu_clear(cpu, cpu_online_map);
1330        cpu_clear(cpu, cpu_callout_map);
1331        cpu_clear(cpu, cpu_callin_map);
1332        /* was set by cpu_init() */
1333        cpu_clear(cpu, cpu_initialized);
1334        numa_remove_cpu(cpu);
1335}
1336
1337void cpu_disable_common(void)
1338{
1339        int cpu = smp_processor_id();
1340        /*
1341         * HACK:
1342         * Allow any queued timer interrupts to get serviced
1343         * This is only a temporary solution until we cleanup
1344         * fixup_irqs as we do for IA64.
1345         */
1346        local_irq_enable();
1347        mdelay(1);
1348
1349        local_irq_disable();
1350        remove_siblinginfo(cpu);
1351
1352        /* It's now safe to remove this processor from the online map */
1353        lock_vector_lock();
1354        remove_cpu_from_maps(cpu);
1355        unlock_vector_lock();
1356        fixup_irqs(cpu_online_map);
1357}
1358
1359int native_cpu_disable(void)
1360{
1361        int cpu = smp_processor_id();
1362
1363        /*
1364         * Perhaps use cpufreq to drop frequency, but that could go
1365         * into generic code.
1366         *
1367         * We won't take down the boot processor on i386 due to some
1368         * interrupts only being able to be serviced by the BSP.
1369         * Especially so if we're not using an IOAPIC        -zwane
1370         */
1371        if (cpu == 0)
1372                return -EBUSY;
1373
1374        if (nmi_watchdog == NMI_LOCAL_APIC)
1375                stop_apic_nmi_watchdog(NULL);
1376        clear_local_APIC();
1377
1378        cpu_disable_common();
1379        return 0;
1380}
1381
1382void native_cpu_die(unsigned int cpu)
1383{
1384        /* We don't do anything here: idle task is faking death itself. */
1385        unsigned int i;
1386
1387        for (i = 0; i < 10; i++) {
1388                /* They ack this in play_dead by setting CPU_DEAD */
1389                if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1390                        printk(KERN_INFO "CPU %d is now offline\n", cpu);
1391                        if (1 == num_online_cpus())
1392                                alternatives_smp_switch(0);
1393                        return;
1394                }
1395                msleep(100);
1396        }
1397        printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1398}
1399
1400void play_dead_common(void)
1401{
1402        idle_task_exit();
1403        reset_lazy_tlbstate();
1404        irq_ctx_exit(raw_smp_processor_id());
1405        c1e_remove_cpu(raw_smp_processor_id());
1406
1407        mb();
1408        /* Ack it */
1409        __get_cpu_var(cpu_state) = CPU_DEAD;
1410
1411        /*
1412         * With physical CPU hotplug, we should halt the cpu
1413         */
1414        local_irq_disable();
1415}
1416
1417void native_play_dead(void)
1418{
1419        play_dead_common();
1420        wbinvd_halt();
1421}
1422
1423#else /* ... !CONFIG_HOTPLUG_CPU */
1424int native_cpu_disable(void)
1425{
1426        return -ENOSYS;
1427}
1428
1429void native_cpu_die(unsigned int cpu)
1430{
1431        /* We said "no" in __cpu_disable */
1432        BUG();
1433}
1434
1435void native_play_dead(void)
1436{
1437        BUG();
1438}
1439
1440#endif