1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/init.h>
43#include <linux/smp.h>
44#include <linux/module.h>
45#include <linux/sched.h>
46#include <linux/percpu.h>
47#include <linux/bootmem.h>
48#include <linux/err.h>
49#include <linux/nmi.h>
50
51#include <asm/acpi.h>
52#include <asm/desc.h>
53#include <asm/nmi.h>
54#include <asm/irq.h>
55#include <asm/idle.h>
56#include <asm/smp.h>
57#include <asm/trampoline.h>
58#include <asm/cpu.h>
59#include <asm/numa.h>
60#include <asm/pgtable.h>
61#include <asm/tlbflush.h>
62#include <asm/mtrr.h>
63#include <asm/vmi.h>
64#include <asm/genapic.h>
65#include <linux/mc146818rtc.h>
66
67#include <mach_apic.h>
68#include <mach_wakecpu.h>
69#include <smpboot_hooks.h>
70
71#ifdef CONFIG_X86_32
72u8 apicid_2_node[MAX_APICID];
73static int low_mappings;
74#endif
75
76
77DEFINE_PER_CPU(int, cpu_state) = { 0 };
78
79
80
81
82
83#ifdef CONFIG_HOTPLUG_CPU
84
85
86
87
88static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
89#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
90#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
91#else
92static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
93#define get_idle_for_cpu(x) (idle_thread_array[(x)])
94#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
95#endif
96
97
98int smp_num_siblings = 1;
99EXPORT_SYMBOL(smp_num_siblings);
100
101
102DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
103
104
105cpumask_t cpu_online_map __read_mostly;
106EXPORT_SYMBOL(cpu_online_map);
107
108cpumask_t cpu_callin_map;
109cpumask_t cpu_callout_map;
110cpumask_t cpu_possible_map;
111EXPORT_SYMBOL(cpu_possible_map);
112
113
114DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
115EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
116
117
118DEFINE_PER_CPU(cpumask_t, cpu_core_map);
119EXPORT_PER_CPU_SYMBOL(cpu_core_map);
120
121
122DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
123EXPORT_PER_CPU_SYMBOL(cpu_info);
124
125static atomic_t init_deasserted;
126
127
128
129static cpumask_t cpu_sibling_setup_map;
130
131
132static int __cpuinitdata smp_b_stepping;
133
134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
135
136
137cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
138 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
139EXPORT_SYMBOL(node_to_cpumask_map);
140
141int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
142EXPORT_SYMBOL(cpu_to_node_map);
143
144
145static void map_cpu_to_node(int cpu, int node)
146{
147 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
148 cpu_set(cpu, node_to_cpumask_map[node]);
149 cpu_to_node_map[cpu] = node;
150}
151
152
153static void unmap_cpu_to_node(int cpu)
154{
155 int node;
156
157 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
158 for (node = 0; node < MAX_NUMNODES; node++)
159 cpu_clear(cpu, node_to_cpumask_map[node]);
160 cpu_to_node_map[cpu] = 0;
161}
162#else
163#define map_cpu_to_node(cpu, node) ({})
164#define unmap_cpu_to_node(cpu) ({})
165#endif
166
167#ifdef CONFIG_X86_32
168static int boot_cpu_logical_apicid;
169
170u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
171 { [0 ... NR_CPUS-1] = BAD_APICID };
172
173static void map_cpu_to_logical_apicid(void)
174{
175 int cpu = smp_processor_id();
176 int apicid = logical_smp_processor_id();
177 int node = apicid_to_node(apicid);
178
179 if (!node_online(node))
180 node = first_online_node;
181
182 cpu_2_logical_apicid[cpu] = apicid;
183 map_cpu_to_node(cpu, node);
184}
185
186void numa_remove_cpu(int cpu)
187{
188 cpu_2_logical_apicid[cpu] = BAD_APICID;
189 unmap_cpu_to_node(cpu);
190}
191#else
192#define map_cpu_to_logical_apicid() do {} while (0)
193#endif
194
195
196
197
198
199static void __cpuinit smp_callin(void)
200{
201 int cpuid, phys_id;
202 unsigned long timeout;
203
204
205
206
207
208
209
210 wait_for_init_deassert(&init_deasserted);
211
212
213
214
215 phys_id = read_apic_id();
216 cpuid = smp_processor_id();
217 if (cpu_isset(cpuid, cpu_callin_map)) {
218 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
219 phys_id, cpuid);
220 }
221 pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
222
223
224
225
226
227
228
229
230
231
232
233
234 timeout = jiffies + 2*HZ;
235 while (time_before(jiffies, timeout)) {
236
237
238
239 if (cpu_isset(cpuid, cpu_callout_map))
240 break;
241 cpu_relax();
242 }
243
244 if (!time_before(jiffies, timeout)) {
245 panic("%s: CPU%d started up but did not get a callout!\n",
246 __func__, cpuid);
247 }
248
249
250
251
252
253
254
255
256 pr_debug("CALLIN, before setup_local_APIC().\n");
257 smp_callin_clear_local_apic();
258 setup_local_APIC();
259 end_local_APIC_setup();
260 map_cpu_to_logical_apicid();
261
262 notify_cpu_starting(cpuid);
263
264
265
266
267
268
269 local_irq_enable();
270 calibrate_delay();
271 local_irq_disable();
272 pr_debug("Stack at about %p\n", &cpuid);
273
274
275
276
277 smp_store_cpu_info(cpuid);
278
279
280
281
282 cpu_set(cpuid, cpu_callin_map);
283}
284
285static int __cpuinitdata unsafe_smp;
286
287
288
289
290static void __cpuinit start_secondary(void *unused)
291{
292
293
294
295
296
297 vmi_bringup();
298 cpu_init();
299 preempt_disable();
300 smp_callin();
301
302
303 barrier();
304
305
306
307 check_tsc_sync_target();
308
309 if (nmi_watchdog == NMI_IO_APIC) {
310 disable_8259A_irq(0);
311 enable_NMI_through_LVT0();
312 enable_8259A_irq(0);
313 }
314
315#ifdef CONFIG_X86_32
316 while (low_mappings)
317 cpu_relax();
318 __flush_tlb_all();
319#endif
320
321
322 set_cpu_sibling_map(raw_smp_processor_id());
323 wmb();
324
325
326
327
328
329
330
331
332
333
334
335
336
337 ipi_call_lock();
338 lock_vector_lock();
339 __setup_vector_irq(smp_processor_id());
340 cpu_set(smp_processor_id(), cpu_online_map);
341 unlock_vector_lock();
342 ipi_call_unlock();
343 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
344
345
346 local_irq_enable();
347
348 setup_secondary_clock();
349
350 wmb();
351 cpu_idle();
352}
353
354static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
355{
356
357
358
359 if (c->x86_vendor == X86_VENDOR_INTEL &&
360 c->x86 == 5 &&
361 c->x86_mask >= 1 && c->x86_mask <= 4 &&
362 c->x86_model <= 3)
363
364
365
366 smp_b_stepping = 1;
367
368
369
370
371
372 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
373
374 if (num_possible_cpus() == 1)
375 goto valid_k7;
376
377
378 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
379 (c->x86_mask == 1)))
380 goto valid_k7;
381
382
383 if ((c->x86_model == 7) && (c->x86_mask == 0))
384 goto valid_k7;
385
386
387
388
389
390
391
392
393 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
394 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
395 (c->x86_model > 7))
396 if (cpu_has_mp)
397 goto valid_k7;
398
399
400 unsafe_smp = 1;
401 }
402
403valid_k7:
404 ;
405}
406
407static void __cpuinit smp_checks(void)
408{
409 if (smp_b_stepping)
410 printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
411 "with B stepping processors.\n");
412
413
414
415
416
417 if (unsafe_smp && num_online_cpus() > 1) {
418 printk(KERN_INFO "WARNING: This combination of AMD"
419 "processors is not suitable for SMP.\n");
420 add_taint(TAINT_UNSAFE_SMP);
421 }
422}
423
424
425
426
427
428
429void __cpuinit smp_store_cpu_info(int id)
430{
431 struct cpuinfo_x86 *c = &cpu_data(id);
432
433 *c = boot_cpu_data;
434 c->cpu_index = id;
435 if (id != 0)
436 identify_secondary_cpu(c);
437 smp_apply_quirks(c);
438}
439
440
441void __cpuinit set_cpu_sibling_map(int cpu)
442{
443 int i;
444 struct cpuinfo_x86 *c = &cpu_data(cpu);
445
446 cpu_set(cpu, cpu_sibling_setup_map);
447
448 if (smp_num_siblings > 1) {
449 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
450 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
451 c->cpu_core_id == cpu_data(i).cpu_core_id) {
452 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
453 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
454 cpu_set(i, per_cpu(cpu_core_map, cpu));
455 cpu_set(cpu, per_cpu(cpu_core_map, i));
456 cpu_set(i, c->llc_shared_map);
457 cpu_set(cpu, cpu_data(i).llc_shared_map);
458 }
459 }
460 } else {
461 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
462 }
463
464 cpu_set(cpu, c->llc_shared_map);
465
466 if (current_cpu_data.x86_max_cores == 1) {
467 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
468 c->booted_cores = 1;
469 return;
470 }
471
472 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
473 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
474 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
475 cpu_set(i, c->llc_shared_map);
476 cpu_set(cpu, cpu_data(i).llc_shared_map);
477 }
478 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
479 cpu_set(i, per_cpu(cpu_core_map, cpu));
480 cpu_set(cpu, per_cpu(cpu_core_map, i));
481
482
483
484 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
485
486
487
488
489 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
490 c->booted_cores++;
491
492
493
494
495 if (i != cpu)
496 cpu_data(i).booted_cores++;
497 } else if (i != cpu && !c->booted_cores)
498 c->booted_cores = cpu_data(i).booted_cores;
499 }
500 }
501}
502
503
504cpumask_t cpu_coregroup_map(int cpu)
505{
506 struct cpuinfo_x86 *c = &cpu_data(cpu);
507
508
509
510
511 if (sched_mc_power_savings || sched_smt_power_savings)
512 return per_cpu(cpu_core_map, cpu);
513 else
514 return c->llc_shared_map;
515}
516
517static void impress_friends(void)
518{
519 int cpu;
520 unsigned long bogosum = 0;
521
522
523
524 pr_debug("Before bogomips.\n");
525 for_each_possible_cpu(cpu)
526 if (cpu_isset(cpu, cpu_callout_map))
527 bogosum += cpu_data(cpu).loops_per_jiffy;
528 printk(KERN_INFO
529 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
530 num_online_cpus(),
531 bogosum/(500000/HZ),
532 (bogosum/(5000/HZ))%100);
533
534 pr_debug("Before bogocount - setting activated=1.\n");
535}
536
537static inline void __inquire_remote_apic(int apicid)
538{
539 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
540 char *names[] = { "ID", "VERSION", "SPIV" };
541 int timeout;
542 u32 status;
543
544 printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
545
546 for (i = 0; i < ARRAY_SIZE(regs); i++) {
547 printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
548
549
550
551
552 status = safe_apic_wait_icr_idle();
553 if (status)
554 printk(KERN_CONT
555 "a previous APIC delivery may have failed\n");
556
557 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
558
559 timeout = 0;
560 do {
561 udelay(100);
562 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
563 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
564
565 switch (status) {
566 case APIC_ICR_RR_VALID:
567 status = apic_read(APIC_RRR);
568 printk(KERN_CONT "%08x\n", status);
569 break;
570 default:
571 printk(KERN_CONT "failed\n");
572 }
573 }
574}
575
576#ifdef WAKE_SECONDARY_VIA_NMI
577
578
579
580
581
582static int __devinit
583wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
584{
585 unsigned long send_status, accept_status = 0;
586 int maxlvt;
587
588
589
590
591 apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
592
593 pr_debug("Waiting for send to finish...\n");
594 send_status = safe_apic_wait_icr_idle();
595
596
597
598
599 udelay(200);
600 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
601 maxlvt = lapic_get_maxlvt();
602 if (maxlvt > 3)
603 apic_write(APIC_ESR, 0);
604 accept_status = (apic_read(APIC_ESR) & 0xEF);
605 }
606 pr_debug("NMI sent.\n");
607
608 if (send_status)
609 printk(KERN_ERR "APIC never delivered???\n");
610 if (accept_status)
611 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
612
613 return (send_status | accept_status);
614}
615#endif
616
617#ifdef WAKE_SECONDARY_VIA_INIT
618static int __devinit
619wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
620{
621 unsigned long send_status, accept_status = 0;
622 int maxlvt, num_starts, j;
623
624 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
625 send_status = uv_wakeup_secondary(phys_apicid, start_eip);
626 atomic_set(&init_deasserted, 1);
627 return send_status;
628 }
629
630 maxlvt = lapic_get_maxlvt();
631
632
633
634
635 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
636 if (maxlvt > 3)
637 apic_write(APIC_ESR, 0);
638 apic_read(APIC_ESR);
639 }
640
641 pr_debug("Asserting INIT.\n");
642
643
644
645
646
647
648
649 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
650 phys_apicid);
651
652 pr_debug("Waiting for send to finish...\n");
653 send_status = safe_apic_wait_icr_idle();
654
655 mdelay(10);
656
657 pr_debug("Deasserting INIT.\n");
658
659
660
661 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
662
663 pr_debug("Waiting for send to finish...\n");
664 send_status = safe_apic_wait_icr_idle();
665
666 mb();
667 atomic_set(&init_deasserted, 1);
668
669
670
671
672
673
674
675 if (APIC_INTEGRATED(apic_version[phys_apicid]))
676 num_starts = 2;
677 else
678 num_starts = 0;
679
680
681
682
683
684 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
685 (unsigned long)stack_start.sp);
686
687
688
689
690 pr_debug("#startup loops: %d.\n", num_starts);
691
692 for (j = 1; j <= num_starts; j++) {
693 pr_debug("Sending STARTUP #%d.\n", j);
694 if (maxlvt > 3)
695 apic_write(APIC_ESR, 0);
696 apic_read(APIC_ESR);
697 pr_debug("After apic_write.\n");
698
699
700
701
702
703
704
705
706 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
707 phys_apicid);
708
709
710
711
712 udelay(300);
713
714 pr_debug("Startup point 1.\n");
715
716 pr_debug("Waiting for send to finish...\n");
717 send_status = safe_apic_wait_icr_idle();
718
719
720
721
722 udelay(200);
723 if (maxlvt > 3)
724 apic_write(APIC_ESR, 0);
725 accept_status = (apic_read(APIC_ESR) & 0xEF);
726 if (send_status || accept_status)
727 break;
728 }
729 pr_debug("After Startup.\n");
730
731 if (send_status)
732 printk(KERN_ERR "APIC never delivered???\n");
733 if (accept_status)
734 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
735
736 return (send_status | accept_status);
737}
738#endif
739
740struct create_idle {
741 struct work_struct work;
742 struct task_struct *idle;
743 struct completion done;
744 int cpu;
745};
746
747static void __cpuinit do_fork_idle(struct work_struct *work)
748{
749 struct create_idle *c_idle =
750 container_of(work, struct create_idle, work);
751
752 c_idle->idle = fork_idle(c_idle->cpu);
753 complete(&c_idle->done);
754}
755
756#ifdef CONFIG_X86_64
757
758
759static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
760{
761 if (!after_bootmem)
762 free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
763}
764
765
766
767
768
769
770int __cpuinit get_local_pda(int cpu)
771{
772 struct x8664_pda *oldpda, *newpda;
773 unsigned long size = sizeof(struct x8664_pda);
774 int node = cpu_to_node(cpu);
775
776 if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
777 return 0;
778
779 oldpda = cpu_pda(cpu);
780 newpda = kmalloc_node(size, GFP_ATOMIC, node);
781 if (!newpda) {
782 printk(KERN_ERR "Could not allocate node local PDA "
783 "for CPU %d on node %d\n", cpu, node);
784
785 if (oldpda)
786 return 0;
787 else
788 return -1;
789 }
790
791 if (oldpda) {
792 memcpy(newpda, oldpda, size);
793 free_bootmem_pda(oldpda);
794 }
795
796 newpda->in_bootmem = 0;
797 cpu_pda(cpu) = newpda;
798 return 0;
799}
800#endif
801
802static int __cpuinit do_boot_cpu(int apicid, int cpu)
803
804
805
806
807
808{
809 unsigned long boot_error = 0;
810 int timeout;
811 unsigned long start_ip;
812 unsigned short nmi_high = 0, nmi_low = 0;
813 struct create_idle c_idle = {
814 .cpu = cpu,
815 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
816 };
817 INIT_WORK(&c_idle.work, do_fork_idle);
818
819#ifdef CONFIG_X86_64
820
821 if (cpu > 0) {
822 boot_error = get_local_pda(cpu);
823 if (boot_error)
824 goto restore_state;
825
826 }
827#endif
828
829 alternatives_smp_switch(1);
830
831 c_idle.idle = get_idle_for_cpu(cpu);
832
833
834
835
836
837 if (c_idle.idle) {
838 c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
839 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
840 init_idle(c_idle.idle, cpu);
841 goto do_rest;
842 }
843
844 if (!keventd_up() || current_is_keventd())
845 c_idle.work.func(&c_idle.work);
846 else {
847 schedule_work(&c_idle.work);
848 wait_for_completion(&c_idle.done);
849 }
850
851 if (IS_ERR(c_idle.idle)) {
852 printk("failed fork for CPU %d\n", cpu);
853 return PTR_ERR(c_idle.idle);
854 }
855
856 set_idle_for_cpu(cpu, c_idle.idle);
857do_rest:
858#ifdef CONFIG_X86_32
859 per_cpu(current_task, cpu) = c_idle.idle;
860 init_gdt(cpu);
861
862 irq_ctx_init(cpu);
863#else
864 cpu_pda(cpu)->pcurrent = c_idle.idle;
865 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
866#endif
867 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
868 initial_code = (unsigned long)start_secondary;
869 stack_start.sp = (void *) c_idle.idle->thread.sp;
870
871
872 start_ip = setup_trampoline();
873
874
875 printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
876 cpu, apicid, start_ip);
877
878
879
880
881
882
883 atomic_set(&init_deasserted, 0);
884
885 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
886
887 pr_debug("Setting warm reset code and vector.\n");
888
889 store_NMI_vector(&nmi_high, &nmi_low);
890
891 smpboot_setup_warm_reset_vector(start_ip);
892
893
894
895 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
896 apic_write(APIC_ESR, 0);
897 apic_read(APIC_ESR);
898 }
899 }
900
901
902
903
904 boot_error = wakeup_secondary_cpu(apicid, start_ip);
905
906 if (!boot_error) {
907
908
909
910 pr_debug("Before Callout %d.\n", cpu);
911 cpu_set(cpu, cpu_callout_map);
912 pr_debug("After Callout %d.\n", cpu);
913
914
915
916
917 for (timeout = 0; timeout < 50000; timeout++) {
918 if (cpu_isset(cpu, cpu_callin_map))
919 break;
920 udelay(100);
921 }
922
923 if (cpu_isset(cpu, cpu_callin_map)) {
924
925 pr_debug("OK.\n");
926 printk(KERN_INFO "CPU%d: ", cpu);
927 print_cpu_info(&cpu_data(cpu));
928 pr_debug("CPU has booted.\n");
929 } else {
930 boot_error = 1;
931 if (*((volatile unsigned char *)trampoline_base)
932 == 0xA5)
933
934 printk(KERN_ERR "Stuck ??\n");
935 else
936
937 printk(KERN_ERR "Not responding.\n");
938 if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
939 inquire_remote_apic(apicid);
940 }
941 }
942#ifdef CONFIG_X86_64
943restore_state:
944#endif
945 if (boot_error) {
946
947 numa_remove_cpu(cpu);
948 cpu_clear(cpu, cpu_callout_map);
949 cpu_clear(cpu, cpu_initialized);
950 cpu_clear(cpu, cpu_present_map);
951 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
952 }
953
954
955 *((volatile unsigned long *)trampoline_base) = 0;
956
957
958
959
960 smpboot_restore_warm_reset_vector();
961
962 return boot_error;
963}
964
965int __cpuinit native_cpu_up(unsigned int cpu)
966{
967 int apicid = cpu_present_to_apicid(cpu);
968 unsigned long flags;
969 int err;
970
971 WARN_ON(irqs_disabled());
972
973 pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
974
975 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
976 !physid_isset(apicid, phys_cpu_present_map)) {
977 printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
978 return -EINVAL;
979 }
980
981
982
983
984 if (cpu_isset(cpu, cpu_callin_map)) {
985 pr_debug("do_boot_cpu %d Already started\n", cpu);
986 return -ENOSYS;
987 }
988
989
990
991
992
993 mtrr_save_state();
994
995 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
996
997#ifdef CONFIG_X86_32
998
999 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1000 min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1001 flush_tlb_all();
1002 low_mappings = 1;
1003
1004 err = do_boot_cpu(apicid, cpu);
1005
1006 zap_low_mappings();
1007 low_mappings = 0;
1008#else
1009 err = do_boot_cpu(apicid, cpu);
1010#endif
1011 if (err) {
1012 pr_debug("do_boot_cpu failed %d\n", err);
1013 return -EIO;
1014 }
1015
1016
1017
1018
1019
1020 local_irq_save(flags);
1021 check_tsc_sync_source(cpu);
1022 local_irq_restore(flags);
1023
1024 while (!cpu_online(cpu)) {
1025 cpu_relax();
1026 touch_nmi_watchdog();
1027 }
1028
1029 return 0;
1030}
1031
1032
1033
1034
1035
1036
1037static __init void disable_smp(void)
1038{
1039 cpu_present_map = cpumask_of_cpu(0);
1040 cpu_possible_map = cpumask_of_cpu(0);
1041 smpboot_clear_io_apic_irqs();
1042
1043 if (smp_found_config)
1044 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1045 else
1046 physid_set_mask_of_physid(0, &phys_cpu_present_map);
1047 map_cpu_to_logical_apicid();
1048 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1049 cpu_set(0, per_cpu(cpu_core_map, 0));
1050}
1051
1052
1053
1054
1055static int __init smp_sanity_check(unsigned max_cpus)
1056{
1057 preempt_disable();
1058
1059#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
1060 if (def_to_bigsmp && nr_cpu_ids > 8) {
1061 unsigned int cpu;
1062 unsigned nr;
1063
1064 printk(KERN_WARNING
1065 "More than 8 CPUs detected - skipping them.\n"
1066 "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
1067
1068 nr = 0;
1069 for_each_present_cpu(cpu) {
1070 if (nr >= 8)
1071 cpu_clear(cpu, cpu_present_map);
1072 nr++;
1073 }
1074
1075 nr = 0;
1076 for_each_possible_cpu(cpu) {
1077 if (nr >= 8)
1078 cpu_clear(cpu, cpu_possible_map);
1079 nr++;
1080 }
1081
1082 nr_cpu_ids = 8;
1083 }
1084#endif
1085
1086 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1087 printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
1088 "by the BIOS.\n", hard_smp_processor_id());
1089 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1090 }
1091
1092
1093
1094
1095
1096 if (!smp_found_config && !acpi_lapic) {
1097 preempt_enable();
1098 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1099 disable_smp();
1100 if (APIC_init_uniprocessor())
1101 printk(KERN_NOTICE "Local APIC not detected."
1102 " Using dummy APIC emulation.\n");
1103 return -1;
1104 }
1105
1106
1107
1108
1109
1110 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1111 printk(KERN_NOTICE
1112 "weird, boot CPU (#%d) not listed by the BIOS.\n",
1113 boot_cpu_physical_apicid);
1114 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1115 }
1116 preempt_enable();
1117
1118
1119
1120
1121 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1122 !cpu_has_apic) {
1123 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1124 boot_cpu_physical_apicid);
1125 printk(KERN_ERR "... forcing use of dummy APIC emulation."
1126 "(tell your hw vendor)\n");
1127 smpboot_clear_io_apic();
1128 return -1;
1129 }
1130
1131 verify_local_APIC();
1132
1133
1134
1135
1136 if (!max_cpus) {
1137 printk(KERN_INFO "SMP mode deactivated.\n");
1138 smpboot_clear_io_apic();
1139
1140 localise_nmi_watchdog();
1141
1142 connect_bsp_APIC();
1143 setup_local_APIC();
1144 end_local_APIC_setup();
1145 return -1;
1146 }
1147
1148 return 0;
1149}
1150
1151static void __init smp_cpu_index_default(void)
1152{
1153 int i;
1154 struct cpuinfo_x86 *c;
1155
1156 for_each_possible_cpu(i) {
1157 c = &cpu_data(i);
1158
1159 c->cpu_index = NR_CPUS;
1160 }
1161}
1162
1163
1164
1165
1166
1167void __init native_smp_prepare_cpus(unsigned int max_cpus)
1168{
1169 preempt_disable();
1170 smp_cpu_index_default();
1171 current_cpu_data = boot_cpu_data;
1172 cpu_callin_map = cpumask_of_cpu(0);
1173 mb();
1174
1175
1176
1177 smp_store_cpu_info(0);
1178#ifdef CONFIG_X86_32
1179 boot_cpu_logical_apicid = logical_smp_processor_id();
1180#endif
1181 current_thread_info()->cpu = 0;
1182 set_cpu_sibling_map(0);
1183
1184#ifdef CONFIG_X86_64
1185 enable_IR_x2apic();
1186 setup_apic_routing();
1187#endif
1188
1189 if (smp_sanity_check(max_cpus) < 0) {
1190 printk(KERN_INFO "SMP disabled\n");
1191 disable_smp();
1192 goto out;
1193 }
1194
1195 preempt_disable();
1196 if (read_apic_id() != boot_cpu_physical_apicid) {
1197 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1198 read_apic_id(), boot_cpu_physical_apicid);
1199
1200 }
1201 preempt_enable();
1202
1203 connect_bsp_APIC();
1204
1205
1206
1207
1208 setup_local_APIC();
1209
1210#ifdef CONFIG_X86_64
1211
1212
1213
1214 if (!skip_ioapic_setup && nr_ioapics)
1215 enable_IO_APIC();
1216#endif
1217 end_local_APIC_setup();
1218
1219 map_cpu_to_logical_apicid();
1220
1221 setup_portio_remap();
1222
1223 smpboot_setup_io_apic();
1224
1225
1226
1227
1228 printk(KERN_INFO "CPU%d: ", 0);
1229 print_cpu_info(&cpu_data(0));
1230 setup_boot_clock();
1231
1232 if (is_uv_system())
1233 uv_system_init();
1234out:
1235 preempt_enable();
1236}
1237
1238
1239
1240void __init native_smp_prepare_boot_cpu(void)
1241{
1242 int me = smp_processor_id();
1243#ifdef CONFIG_X86_32
1244 init_gdt(me);
1245#endif
1246 switch_to_new_gdt();
1247
1248 cpu_set(me, cpu_callout_map);
1249 per_cpu(cpu_state, me) = CPU_ONLINE;
1250}
1251
1252void __init native_smp_cpus_done(unsigned int max_cpus)
1253{
1254 pr_debug("Boot done.\n");
1255
1256 impress_friends();
1257 smp_checks();
1258#ifdef CONFIG_X86_IO_APIC
1259 setup_ioapic_dest();
1260#endif
1261 check_nmi_watchdog();
1262}
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281__init void prefill_possible_map(void)
1282{
1283 int i, possible;
1284
1285
1286 if (!num_processors)
1287 num_processors = 1;
1288
1289 possible = num_processors + disabled_cpus;
1290 if (possible > NR_CPUS)
1291 possible = NR_CPUS;
1292
1293 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1294 possible, max_t(int, possible - num_processors, 0));
1295
1296 for (i = 0; i < possible; i++)
1297 cpu_set(i, cpu_possible_map);
1298
1299 nr_cpu_ids = possible;
1300}
1301
1302#ifdef CONFIG_HOTPLUG_CPU
1303
1304static void remove_siblinginfo(int cpu)
1305{
1306 int sibling;
1307 struct cpuinfo_x86 *c = &cpu_data(cpu);
1308
1309 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1310 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1311
1312
1313
1314 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1315 cpu_data(sibling).booted_cores--;
1316 }
1317
1318 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1319 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1320 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1321 cpus_clear(per_cpu(cpu_core_map, cpu));
1322 c->phys_proc_id = 0;
1323 c->cpu_core_id = 0;
1324 cpu_clear(cpu, cpu_sibling_setup_map);
1325}
1326
1327static void __ref remove_cpu_from_maps(int cpu)
1328{
1329 cpu_clear(cpu, cpu_online_map);
1330 cpu_clear(cpu, cpu_callout_map);
1331 cpu_clear(cpu, cpu_callin_map);
1332
1333 cpu_clear(cpu, cpu_initialized);
1334 numa_remove_cpu(cpu);
1335}
1336
1337void cpu_disable_common(void)
1338{
1339 int cpu = smp_processor_id();
1340
1341
1342
1343
1344
1345
1346 local_irq_enable();
1347 mdelay(1);
1348
1349 local_irq_disable();
1350 remove_siblinginfo(cpu);
1351
1352
1353 lock_vector_lock();
1354 remove_cpu_from_maps(cpu);
1355 unlock_vector_lock();
1356 fixup_irqs(cpu_online_map);
1357}
1358
1359int native_cpu_disable(void)
1360{
1361 int cpu = smp_processor_id();
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371 if (cpu == 0)
1372 return -EBUSY;
1373
1374 if (nmi_watchdog == NMI_LOCAL_APIC)
1375 stop_apic_nmi_watchdog(NULL);
1376 clear_local_APIC();
1377
1378 cpu_disable_common();
1379 return 0;
1380}
1381
1382void native_cpu_die(unsigned int cpu)
1383{
1384
1385 unsigned int i;
1386
1387 for (i = 0; i < 10; i++) {
1388
1389 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1390 printk(KERN_INFO "CPU %d is now offline\n", cpu);
1391 if (1 == num_online_cpus())
1392 alternatives_smp_switch(0);
1393 return;
1394 }
1395 msleep(100);
1396 }
1397 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1398}
1399
1400void play_dead_common(void)
1401{
1402 idle_task_exit();
1403 reset_lazy_tlbstate();
1404 irq_ctx_exit(raw_smp_processor_id());
1405 c1e_remove_cpu(raw_smp_processor_id());
1406
1407 mb();
1408
1409 __get_cpu_var(cpu_state) = CPU_DEAD;
1410
1411
1412
1413
1414 local_irq_disable();
1415}
1416
1417void native_play_dead(void)
1418{
1419 play_dead_common();
1420 wbinvd_halt();
1421}
1422
1423#else
1424int native_cpu_disable(void)
1425{
1426 return -ENOSYS;
1427}
1428
1429void native_cpu_die(unsigned int cpu)
1430{
1431
1432 BUG();
1433}
1434
1435void native_play_dead(void)
1436{
1437 BUG();
1438}
1439
1440#endif