x86: add cpu hotplug hooks into smp_ops
authorAlex Nixon <alex.nixon@citrix.com>
Fri, 22 Aug 2008 10:52:11 +0000 (11:52 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 25 Aug 2008 08:59:18 +0000 (10:59 +0200)
Signed-off-by: Alex Nixon <alex.nixon@citrix.com>
Acked-by: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
include/asm-x86/smp.h

index 3b7a1ddcc0bce7eca8c989906fa377d4f0322f50..e382fe0ccd663cd4cb40a36b3dd2512fe4cc915f 100644 (file)
@@ -91,7 +91,7 @@ static void cpu_exit_clear(void)
 }
 
 /* We don't actually take CPU down, just spin without interrupts. */
-static inline void play_dead(void)
+void native_play_dead(void)
 {
        /* This must be done before dead CPU ack */
        cpu_exit_clear();
@@ -107,7 +107,7 @@ static inline void play_dead(void)
        wbinvd_halt();
 }
 #else
-static inline void play_dead(void)
+void native_play_dead(void)
 {
        BUG();
 }
index 71553b664e2af8601361a9af3413599b752a3e5c..dfd3f575208581f640bd74e998e1042c24d0deb0 100644 (file)
@@ -90,7 +90,7 @@ DECLARE_PER_CPU(int, cpu_state);
 
 #include <asm/nmi.h>
 /* We halt the CPU with physical CPU hotplug */
-static inline void play_dead(void)
+void native_play_dead(void)
 {
        idle_task_exit();
        mb();
@@ -102,7 +102,7 @@ static inline void play_dead(void)
        wbinvd_halt();
 }
 #else
-static inline void play_dead(void)
+void native_play_dead(void)
 {
        BUG();
 }
index 361b7a4c640c2d9efaee2d95f860bc83d985a2a5..18f9b19f5f8f5d46582b64501d83d11cb4376eb6 100644 (file)
@@ -214,12 +214,16 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
 struct smp_ops smp_ops = {
        .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
        .smp_prepare_cpus = native_smp_prepare_cpus,
-       .cpu_up = native_cpu_up,
        .smp_cpus_done = native_smp_cpus_done,
 
        .smp_send_stop = native_smp_send_stop,
        .smp_send_reschedule = native_smp_send_reschedule,
 
+       .cpu_up = native_cpu_up,
+       .cpu_die = native_cpu_die,
+       .cpu_disable = native_cpu_disable,
+       .play_dead = native_play_dead,
+
        .send_call_func_ipi = native_send_call_func_ipi,
        .send_call_func_single_ipi = native_send_call_func_single_ipi,
 };
index 7985c5b3f9162ba14c0817ce25f1297b34a87a5c..c414cee296ba0f8222dc19c60136a4d7a7d9bfd4 100644 (file)
@@ -1346,7 +1346,7 @@ static void __ref remove_cpu_from_maps(int cpu)
        numa_remove_cpu(cpu);
 }
 
-int __cpu_disable(void)
+int native_cpu_disable(void)
 {
        int cpu = smp_processor_id();
 
@@ -1385,7 +1385,7 @@ int __cpu_disable(void)
        return 0;
 }
 
-void __cpu_die(unsigned int cpu)
+void native_cpu_die(unsigned int cpu)
 {
        /* We don't do anything here: idle task is faking death itself. */
        unsigned int i;
@@ -1403,12 +1403,12 @@ void __cpu_die(unsigned int cpu)
        printk(KERN_ERR "CPU %u didn't die...\n", cpu);
 }
 #else /* ... !CONFIG_HOTPLUG_CPU */
-int __cpu_disable(void)
+int native_cpu_disable(void)
 {
        return -ENOSYS;
 }
 
-void __cpu_die(unsigned int cpu)
+void native_cpu_die(unsigned int cpu)
 {
        /* We said "no" in __cpu_disable */
        BUG();
index 3c877f74f279454cd579cf71530ef3bd051b75ff..dbf4249e2a6d7a73c08de6d39cc2b9eb7a540542 100644 (file)
@@ -47,12 +47,16 @@ extern struct {
 struct smp_ops {
        void (*smp_prepare_boot_cpu)(void);
        void (*smp_prepare_cpus)(unsigned max_cpus);
-       int (*cpu_up)(unsigned cpu);
        void (*smp_cpus_done)(unsigned max_cpus);
 
        void (*smp_send_stop)(void);
        void (*smp_send_reschedule)(int cpu);
 
+       int (*cpu_up)(unsigned cpu);
+       int (*cpu_disable)(void);
+       void (*cpu_die)(unsigned int cpu);
+       void (*play_dead)(void);
+
        void (*send_call_func_ipi)(cpumask_t mask);
        void (*send_call_func_single_ipi)(int cpu);
 };
@@ -91,6 +95,21 @@ static inline int __cpu_up(unsigned int cpu)
        return smp_ops.cpu_up(cpu);
 }
 
+static inline int __cpu_disable(void)
+{
+       return smp_ops.cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+       smp_ops.cpu_die(cpu);
+}
+
+static inline void play_dead(void)
+{
+       smp_ops.play_dead();
+}
+
 static inline void smp_send_reschedule(int cpu)
 {
        smp_ops.smp_send_reschedule(cpu);
@@ -110,12 +129,13 @@ void native_smp_prepare_boot_cpu(void);
 void native_smp_prepare_cpus(unsigned int max_cpus);
 void native_smp_cpus_done(unsigned int max_cpus);
 int native_cpu_up(unsigned int cpunum);
+int native_cpu_disable(void);
+void native_cpu_die(unsigned int cpu);
+void native_play_dead(void);
+
 void native_send_call_func_ipi(cpumask_t mask);
 void native_send_call_func_single_ipi(int cpu);
 
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-
 void smp_store_cpu_info(int id);
 #define cpu_physical_id(cpu)   per_cpu(x86_cpu_to_apicid, cpu)
 
This page took 0.03303 seconds and 5 git commands to generate.