x86: merge init_task_32/64.c
authorHiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Fri, 19 Oct 2007 18:35:02 +0000 (20:35 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 19 Oct 2007 18:35:02 +0000 (20:35 +0200)
Merge init_task_32/64.c.
Move 64bit per cpu data orig_ist to setup64.c.

[ mingo: fixed checkpatch trivialities. ]

Signed-off-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/i386/Makefile
arch/x86/kernel/Makefile_32
arch/x86/kernel/Makefile_64
arch/x86/kernel/init_task.c [new file with mode: 0644]
arch/x86/kernel/init_task_32.c [deleted file]
arch/x86/kernel/init_task_64.c [deleted file]
arch/x86/kernel/setup64.c
arch/x86_64/Makefile

index d82f11de60ce5432d36a0fffbdff0bc3a71a9914..b88e47ca3032e8efc7f10dddebc3d12eb87e5f84 100644 (file)
@@ -102,7 +102,7 @@ core-$(CONFIG_XEN)          += arch/x86/xen/
 # default subarch .h files
 mflags-y += -Iinclude/asm-x86/mach-default
 
-head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task_32.o
+head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o
 
 libs-y                                         += arch/x86/lib/
 core-y                                 += arch/x86/kernel/ \
index a3fa11f8f4609f6744cba78d95cd829dd617a4ea..69990f0ba86e3cbc1e84346c0b71988f4a23191a 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the linux kernel.
 #
 
-extra-y := head_32.o init_task_32.o vmlinux.lds
+extra-y := head_32.o init_task.o vmlinux.lds
 
 obj-y  := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
                ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
index 43da66213a479117a91c05f62e4b67447d412205..2fc3d3f2c27eb530b8bf81317e517b6b0da12219 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the linux kernel.
 #
 
-extra-y        := head_64.o head64.o init_task_64.o vmlinux.lds
+extra-y        := head_64.o head64.o init_task.o vmlinux.lds
 EXTRA_AFLAGS   := -traditional
 obj-y  := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
                ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
new file mode 100644 (file)
index 0000000..468c9c4
--- /dev/null
@@ -0,0 +1,47 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is THREAD_SIZE aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+       __attribute__((__section__(".data.init_task"))) =
+               { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
+
+/*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+
diff --git a/arch/x86/kernel/init_task_32.c b/arch/x86/kernel/init_task_32.c
deleted file mode 100644 (file)
index d26fc06..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is THREAD_SIZE aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union 
-       __attribute__((__section__(".data.init_task"))) =
-               { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
-
-/*
- * per-CPU TSS segments. Threads are completely 'soft' on Linux,
- * no more per-task TSS's.
- */ 
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
-
diff --git a/arch/x86/kernel/init_task_64.c b/arch/x86/kernel/init_task_64.c
deleted file mode 100644 (file)
index 4ff33d4..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial task structure.
- *
- * We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union 
-       __attribute__((__section__(".data.init_task"))) =
-               { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
-/*
- * per-CPU TSS segments. Threads are completely 'soft' on Linux,
- * no more per-task TSS's. The TSS size is kept cacheline-aligned
- * so they are allowed to end up in the .data.cacheline_aligned
- * section. Since TSS's are completely CPU-local, we want them
- * on exact cacheline boundaries, to eliminate cacheline ping-pong.
- */ 
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
-
-/* Copies of the original ist values from the tss are only accessed during
- * debugging, no special alignment required.
- */
-DEFINE_PER_CPU(struct orig_ist, orig_ist);
-
-#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
index ba9188235057b17a6002a10c36493646aa747da8..e11886e8d8ca2f359e6dc2c062d411c74317db77 100644 (file)
@@ -184,6 +184,12 @@ void __cpuinit check_efer(void)
 
 unsigned long kernel_eflags;
 
+/*
+ * Copies of the original ist values from the tss are only accessed during
+ * debugging, no special alignment required.
+ */
+DEFINE_PER_CPU(struct orig_ist, orig_ist);
+
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
index f48d862397fb29488b485c6b07a9fc44e8ce9f2c..6d89ab762ffcf759b2dc6d344babe73ce056a239 100644 (file)
@@ -74,7 +74,7 @@ KBUILD_CFLAGS += $(cflags-y)
 CFLAGS_KERNEL += $(cflags-kernel-y)
 KBUILD_AFLAGS += -m64
 
-head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task_64.o
+head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o
 
 libs-y                                         += arch/x86/lib/
 core-y                                 += arch/x86/kernel/ \
This page took 0.043759 seconds and 5 git commands to generate.