Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 17 Oct 2007 20:13:16 +0000 (13:13 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 17 Oct 2007 20:13:16 +0000 (13:13 -0700)
* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (114 commits)
  x86: delete vsyscall files during make clean
  kbuild: fix typo SRCARCH in find_sources
  x86: fix kernel rebuild due to vsyscall fallout
  .gitignore update for x86 arch
  x86: unify include/asm/debugreg_32/64.h
  x86: unify include/asm/unwind_32/64.h
  x86: unify include/asm/types_32/64.h
  x86: unify include/asm/tlb_32/64.h
  x86: unify include/asm/siginfo_32/64.h
  x86: unify include/asm/bug_32/64.h
  x86: unify include/asm/mman_32/64.h
  x86: unify include/asm/agp_32/64.h
  x86: unify include/asm/kdebug_32/64.h
  x86: unify include/asm/ioctls_32/64.h
  x86: unify include/asm/floppy_32/64.h
  x86: apply missing DMA/OOM prevention to floppy_32.h
  x86: unify include/asm/cache_32/64.h
  x86: unify include/asm/cache_32/64.h
  x86: unify include/asm/dmi_32/64.h
  x86: unify include/asm/delay_32/64.h
  ...

1  2 
arch/x86/kernel/alternative.c
arch/x86/mm/init_32.c
arch/x86/xen/smp.c

index 42421437ded310a4ea8bb75239639e274427c1c6,a3ae8e6c8b3b4a14c7ee8312d6b591df864b50f6..3bd2688bd4432a9cab554d6c665e1ceb64e9d410
@@@ -63,11 -63,11 +63,11 @@@ __setup("noreplace-paravirt", setup_nor
  /* Use inline assembly to define this because the nops are defined
     as inline assembly strings in the include files and we cannot
     get them easily into strings. */
- asm("\t.data\nintelnops: "
+ asm("\t.section .rodata, \"a\"\nintelnops: "
        GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
        GENERIC_NOP7 GENERIC_NOP8);
- extern unsigned char intelnops[];
- static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
+ extern const unsigned char intelnops[];
+ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
        NULL,
        intelnops,
        intelnops + 1,
  #endif
  
  #ifdef K8_NOP1
- asm("\t.data\nk8nops: "
+ asm("\t.section .rodata, \"a\"\nk8nops: "
        K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
        K8_NOP7 K8_NOP8);
- extern unsigned char k8nops[];
- static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
+ extern const unsigned char k8nops[];
+ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
        NULL,
        k8nops,
        k8nops + 1,
  #endif
  
  #ifdef K7_NOP1
- asm("\t.data\nk7nops: "
+ asm("\t.section .rodata, \"a\"\nk7nops: "
        K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
        K7_NOP7 K7_NOP8);
- extern unsigned char k7nops[];
- static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
+ extern const unsigned char k7nops[];
+ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
        NULL,
        k7nops,
        k7nops + 1,
  };
  #endif
  
+ #ifdef P6_NOP1
+ asm("\t.section .rodata, \"a\"\np6nops: "
+       P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
+       P6_NOP7 P6_NOP8);
+ extern const unsigned char p6nops[];
+ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
+       NULL,
+       p6nops,
+       p6nops + 1,
+       p6nops + 1 + 2,
+       p6nops + 1 + 2 + 3,
+       p6nops + 1 + 2 + 3 + 4,
+       p6nops + 1 + 2 + 3 + 4 + 5,
+       p6nops + 1 + 2 + 3 + 4 + 5 + 6,
+       p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+ };
+ #endif
  #ifdef CONFIG_X86_64
  
  extern char __vsyscall_0;
- static inline unsigned char** find_nop_table(void)
+ static inline const unsigned char*const * find_nop_table(void)
  {
-       return k8_nops;
+       return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+              boot_cpu_data.x86 < 6 ? k8_nops : p6_nops;
  }
  
  #else /* CONFIG_X86_64 */
  
- static struct nop {
+ static const struct nop {
        int cpuid;
-       unsigned char **noptable;
+       const unsigned char *const *noptable;
  } noptypes[] = {
        { X86_FEATURE_K8, k8_nops },
        { X86_FEATURE_K7, k7_nops },
+       { X86_FEATURE_P4, p6_nops },
+       { X86_FEATURE_P3, p6_nops },
        { -1, NULL }
  };
  
- static unsigned char** find_nop_table(void)
+ static const unsigned char*const * find_nop_table(void)
  {
-       unsigned char **noptable = intel_nops;
+       const unsigned char *const *noptable = intel_nops;
        int i;
  
        for (i = 0; noptypes[i].cpuid >= 0; i++) {
  /* Use this to add nops to a buffer, then text_poke the whole buffer. */
  static void add_nops(void *insns, unsigned int len)
  {
-       unsigned char **noptable = find_nop_table();
+       const unsigned char *const *noptable = find_nop_table();
  
        while (len > 0) {
                unsigned int noplen = len;
@@@ -369,8 -390,8 +390,8 @@@ void apply_paravirt(struct paravirt_pat
                BUG_ON(p->len > MAX_PATCH_LEN);
                /* prep the buffer with the original instructions */
                memcpy(insnbuf, p->instr, p->len);
 -              used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf,
 -                                        (unsigned long)p->instr, p->len);
 +              used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
 +                                       (unsigned long)p->instr, p->len);
  
                BUG_ON(used > p->len);
  
@@@ -415,9 -436,6 +436,6 @@@ void __init alternative_instructions(vo
                        alternatives_smp_unlock(__smp_locks, __smp_locks_end,
                                                _text, _etext);
                }
-               free_init_pages("SMP alternatives",
-                               (unsigned long)__smp_locks,
-                               (unsigned long)__smp_locks_end);
        } else {
                alternatives_smp_module_add(NULL, "core kernel",
                                            __smp_locks, __smp_locks_end,
        apply_paravirt(__parainstructions, __parainstructions_end);
        local_irq_restore(flags);
  
+       if (smp_alt_once)
+               free_init_pages("SMP alternatives",
+                               (unsigned long)__smp_locks,
+                               (unsigned long)__smp_locks_end);
        restart_nmi();
  #ifdef CONFIG_X86_MCE
        restart_mce();
diff --combined arch/x86/mm/init_32.c
index 33d367a3432ebfbafbe0294872a8a20a9547e456,e4e37d4f4c52112bef742206594a2111e59a13f5..c7d19471261dc7050244bd3f0f0883bdac8c3ccb
@@@ -85,13 -85,20 +85,20 @@@ static pmd_t * __init one_md_table_init
  static pte_t * __init one_page_table_init(pmd_t *pmd)
  {
        if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
-               pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+               pte_t *page_table = NULL;
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+               page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
+ #endif
+               if (!page_table)
+                       page_table =
+                               (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  
                paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
                BUG_ON(page_table != pte_offset_kernel(pmd, 0));
        }
-       
        return pte_offset_kernel(pmd, 0);
  }
  
@@@ -741,12 -748,24 +748,12 @@@ struct kmem_cache *pmd_cache
  
  void __init pgtable_cache_init(void)
  {
 -      size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t);
 -
 -      if (PTRS_PER_PMD > 1) {
 +      if (PTRS_PER_PMD > 1)
                pmd_cache = kmem_cache_create("pmd",
 -                                      PTRS_PER_PMD*sizeof(pmd_t),
 -                                      PTRS_PER_PMD*sizeof(pmd_t),
 -                                      SLAB_PANIC,
 -                                      pmd_ctor);
 -              if (!SHARED_KERNEL_PMD) {
 -                      /* If we're in PAE mode and have a non-shared
 -                         kernel pmd, then the pgd size must be a
 -                         page size.  This is because the pgd_list
 -                         links through the page structure, so there
 -                         can only be one pgd per page for this to
 -                         work. */
 -                      pgd_size = PAGE_SIZE;
 -              }
 -      }
 +                                            PTRS_PER_PMD*sizeof(pmd_t),
 +                                            PTRS_PER_PMD*sizeof(pmd_t),
 +                                            SLAB_PANIC,
 +                                            pmd_ctor);
  }
  
  /*
diff --combined arch/x86/xen/smp.c
index d53bf9d8a72d083ed0274860c042be3ec6ff3c2e,6c058585459cd8fa9f1e7fec655601a38aae759a..c1b131bcdcbe71f2adc14833339b9096d37e9964
@@@ -356,6 -356,7 +356,7 @@@ static irqreturn_t xen_call_function_in
         */
        irq_enter();
        (*func)(info);
+       __get_cpu_var(irq_stat).irq_call_count++;
        irq_exit();
  
        if (wait) {
@@@ -370,8 -371,7 +371,8 @@@ int xen_smp_call_function_mask(cpumask_
                               void *info, int wait)
  {
        struct call_data_struct data;
 -      int cpus;
 +      int cpus, cpu;
 +      bool yield;
  
        /* Holding any lock stops cpus from going down. */
        spin_lock(&call_lock);
        /* Send a message to other CPUs and wait for them to respond */
        xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
  
 -      /* Make sure other vcpus get a chance to run.
 -         XXX too severe?  Maybe we should check the other CPU's states? */
 -      HYPERVISOR_sched_op(SCHEDOP_yield, 0);
 +      /* Make sure other vcpus get a chance to run if they need to. */
 +      yield = false;
 +      for_each_cpu_mask(cpu, mask)
 +              if (xen_vcpu_stolen(cpu))
 +                      yield = true;
 +
 +      if (yield)
 +              HYPERVISOR_sched_op(SCHEDOP_yield, 0);
  
        /* Wait for response */
        while (atomic_read(&data.started) != cpus ||
This page took 0.032795 seconds and 5 git commands to generate.