x86-64: Move kernelstack from PDA to per-cpu.
authorBrian Gerst <brgerst@gmail.com>
Sun, 18 Jan 2009 15:38:58 +0000 (00:38 +0900)
committerTejun Heo <tj@kernel.org>
Sun, 18 Jan 2009 15:38:58 +0000 (00:38 +0900)
Also clean up PER_CPU_VAR usage in xen-asm_64.S

tj: * remove now unused stack_thread_info()
    * s/kernelstack/kernel_stack/
    * added FIXME comment in xen-asm_64.S

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/pda.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/process_64.c
arch/x86/kernel/smpboot.c
arch/x86/xen/xen-asm_64.S

index 256b00b61892b2e3049ef8ff65a672df701efc77..9c79b247700801e6467b5629e7ee757f09a44b7e 100644 (file)
@@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
        CFI_DEF_CFA     rsp,0
        CFI_REGISTER    rsp,rbp
        SWAPGS_UNSAFE_STACK
-       movq    %gs:pda_kernelstack, %rsp
-       addq    $(PDA_STACKOFFSET),%rsp 
+       movq    PER_CPU_VAR(kernel_stack), %rsp
+       addq    $(KERNEL_STACK_OFFSET),%rsp
        /*
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs, here we enable it straight after entry:
@@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
 ENTRY(ia32_cstar_target)
        CFI_STARTPROC32 simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,PDA_STACKOFFSET
+       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
        movl    %esp,%r8d
        CFI_REGISTER    rsp,r8
-       movq    %gs:pda_kernelstack,%rsp
+       movq    PER_CPU_VAR(kernel_stack),%rsp
        /*
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs and here we enable it straight after entry:
index 7209302d922756fa20c4c7a24925d9a045350fd5..4d28ffba6e1b6c7e86224ec26cdf79214254a162 100644 (file)
@@ -13,7 +13,7 @@
 struct x8664_pda {
        unsigned long unused1;
        unsigned long unused2;
-       unsigned long kernelstack;      /* 16 top of kernel stack for current */
+       unsigned long unused3;
        unsigned long oldrsp;           /* 24 user rsp for system call */
        int irqcount;                   /* 32 Irq nesting counter. Starts -1 */
        unsigned int unused6;           /* 36 was cpunumber */
@@ -44,6 +44,4 @@ extern void pda_init(int);
 
 #endif
 
-#define PDA_STACKOFFSET (5*8)
-
 #endif /* _ASM_X86_PDA_H */
index 98789647baa9b8a6d1e414f9e40c95c70f704a11..b46f8ca007b5754ce1282809d6a7e8669104ed81 100644 (file)
@@ -194,25 +194,21 @@ static inline struct thread_info *current_thread_info(void)
 
 #else /* X86_32 */
 
-#include <asm/pda.h>
+#include <asm/percpu.h>
+#define KERNEL_STACK_OFFSET (5*8)
 
 /*
  * macros/functions for gaining access to the thread information structure
  * preempt_count needs to be 1 initially, until the scheduler is functional.
  */
 #ifndef __ASSEMBLY__
-static inline struct thread_info *current_thread_info(void)
-{
-       struct thread_info *ti;
-       ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
-       return ti;
-}
+DECLARE_PER_CPU(unsigned long, kernel_stack);
 
-/* do not use in interrupt context */
-static inline struct thread_info *stack_thread_info(void)
+static inline struct thread_info *current_thread_info(void)
 {
        struct thread_info *ti;
-       asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+       ti = (void *)(percpu_read(kernel_stack) +
+                     KERNEL_STACK_OFFSET - THREAD_SIZE);
        return ti;
 }
 
@@ -220,8 +216,8 @@ static inline struct thread_info *stack_thread_info(void)
 
 /* how to get the thread information struct from ASM */
 #define GET_THREAD_INFO(reg) \
-       movq %gs:pda_kernelstack,reg ; \
-       subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
+       movq PER_CPU_VAR(kernel_stack),reg ; \
+       subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
 
 #endif
 
index 4f7a210e1e58c915d6a05a46f3629b62b0919c77..cafff5f4a031d95f16697f93e318a202c482f92a 100644 (file)
@@ -49,7 +49,6 @@ int main(void)
        BLANK();
 #undef ENTRY
 #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
-       ENTRY(kernelstack); 
        ENTRY(oldrsp); 
        ENTRY(irqcount);
        DEFINE(pda_size, sizeof(struct x8664_pda));
index b50e38d168881f200bb0dd6bfc690908c52cc849..06b6290088f40dc4618033f788da173688ca4265 100644 (file)
@@ -889,6 +889,10 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
        per_cpu_var(irq_stack) + IRQ_STACK_SIZE - 64;
 #endif
 
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
+       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
 void __cpuinit pda_init(int cpu)
 {
        struct x8664_pda *pda = cpu_pda(cpu);
@@ -900,8 +904,6 @@ void __cpuinit pda_init(int cpu)
        load_pda_offset(cpu);
 
        pda->irqcount = -1;
-       pda->kernelstack = (unsigned long)stack_thread_info() -
-                                PDA_STACKOFFSET + THREAD_SIZE;
 
        if (cpu != 0) {
                if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
index d22677a66438ebe7e2eeb2ddb13a6d8d4ae45c7e..0dd45859a7a863c64e95b62ed1891a8738f13f0c 100644 (file)
@@ -468,7 +468,7 @@ END(ret_from_fork)
 ENTRY(system_call)
        CFI_STARTPROC   simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,PDA_STACKOFFSET
+       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
@@ -480,7 +480,7 @@ ENTRY(system_call)
 ENTRY(system_call_after_swapgs)
 
        movq    %rsp,%gs:pda_oldrsp
-       movq    %gs:pda_kernelstack,%rsp
+       movq    PER_CPU_VAR(kernel_stack),%rsp
        /*
         * No need to follow this irqs off/on section - it's straight
         * and short:
index e00c31a4b3c0cbcf793b6258ddd13908b6590aef..6c5f576021088d023f7161e090ddb4f8323478ce 100644 (file)
@@ -620,9 +620,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        write_pda(oldrsp, next->usersp);
        percpu_write(current_task, next_p);
 
-       write_pda(kernelstack,
+       percpu_write(kernel_stack,
                  (unsigned long)task_stack_page(next_p) +
-                 THREAD_SIZE - PDA_STACKOFFSET);
+                 THREAD_SIZE - KERNEL_STACK_OFFSET);
 #ifdef CONFIG_CC_STACKPROTECTOR
        write_pda(stack_canary, next_p->stack_canary);
        /*
index 5854be0fb804ecb4f82c20af635b26c820045a9a..869b98840fd06feefb6c69986767921a2392ea49 100644 (file)
@@ -798,6 +798,9 @@ do_rest:
 #else
        clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
        initial_gs = per_cpu_offset(cpu);
+       per_cpu(kernel_stack, cpu) =
+               (unsigned long)task_stack_page(c_idle.idle) -
+               KERNEL_STACK_OFFSET + THREAD_SIZE;
 #endif
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        initial_code = (unsigned long)start_secondary;
index 05794c566e8799382f9a314970661d5ab232e4a8..5a23e89936784fd7c70b9eb414f9b90ae9beb829 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/processor-flags.h>
 #include <asm/errno.h>
 #include <asm/segment.h>
+#include <asm/percpu.h>
 
 #include <xen/interface/xen.h>
 
 
 #if 1
 /*
-       x86-64 does not yet support direct access to percpu variables
-       via a segment override, so we just need to make sure this code
-       never gets used
+       FIXME: x86_64 now can support direct access to percpu variables
+       via a segment override.  Update xen accordingly.
  */
 #define BUG                    ud2a
-#define PER_CPU_VAR(var, off)  0xdeadbeef
 #endif
 
 /*
@@ -45,14 +44,14 @@ ENTRY(xen_irq_enable_direct)
        BUG
 
        /* Unmask events */
-       movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+       movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 
        /* Preempt here doesn't matter because that will deal with
           any pending interrupts.  The pending check may end up being
           run on the wrong CPU, but that doesn't hurt. */
 
        /* Test for pending */
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
+       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
        jz 1f
 
 2:     call check_events
@@ -69,7 +68,7 @@ ENDPATCH(xen_irq_enable_direct)
 ENTRY(xen_irq_disable_direct)
        BUG
 
-       movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+       movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 ENDPATCH(xen_irq_disable_direct)
        ret
        ENDPROC(xen_irq_disable_direct)
@@ -87,7 +86,7 @@ ENDPATCH(xen_irq_disable_direct)
 ENTRY(xen_save_fl_direct)
        BUG
 
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
        setz %ah
        addb %ah,%ah
 ENDPATCH(xen_save_fl_direct)
@@ -107,13 +106,13 @@ ENTRY(xen_restore_fl_direct)
        BUG
 
        testb $X86_EFLAGS_IF>>8, %ah
-       setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+       setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
        /* Preempt here doesn't matter because that will deal with
           any pending interrupts.  The pending check may end up being
           run on the wrong CPU, but that doesn't hurt. */
 
        /* check for unmasked and pending */
-       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
+       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
        jz 1f
 2:     call check_events
 1:
@@ -196,7 +195,7 @@ ENTRY(xen_sysret64)
        /* We're already on the usermode stack at this point, but still
           with the kernel gs, so we can easily switch back */
        movq %rsp, %gs:pda_oldrsp
-       movq %gs:pda_kernelstack,%rsp
+       movq PER_CPU_VAR(kernel_stack),%rsp
 
        pushq $__USER_DS
        pushq %gs:pda_oldrsp
@@ -213,7 +212,7 @@ ENTRY(xen_sysret32)
        /* We're already on the usermode stack at this point, but still
           with the kernel gs, so we can easily switch back */
        movq %rsp, %gs:pda_oldrsp
-       movq %gs:pda_kernelstack, %rsp
+       movq PER_CPU_VAR(kernel_stack), %rsp
 
        pushq $__USER32_DS
        pushq %gs:pda_oldrsp
This page took 0.030979 seconds and 5 git commands to generate.