locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching
authorPeter Zijlstra (Intel) <peterz@infradead.org>
Fri, 24 Apr 2015 18:56:38 +0000 (14:56 -0400)
committerIngo Molnar <mingo@kernel.org>
Fri, 8 May 2015 10:37:09 +0000 (12:37 +0200)
We use the regular paravirt call patching to switch between:

  native_queued_spin_lock_slowpath() __pv_queued_spin_lock_slowpath()
  native_queued_spin_unlock() __pv_queued_spin_unlock()

We use a callee saved call for the unlock function which reduces the
i-cache footprint and allows 'inlining' of SPIN_UNLOCK functions
again.

We further optimize the unlock path by patching the direct call with a
"movb $0,%arg1" if we are indeed using the native unlock code. This
makes the unlock code almost as fast as the !PARAVIRT case.

This significantly lowers the overhead of having
CONFIG_PARAVIRT_SPINLOCKS enabled, even for native code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Daniel J Blueman <daniel@numascale.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <paolo.bonzini@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1429901803-29771-10-git-send-email-Waiman.Long@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/Kconfig
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/qspinlock.h
arch/x86/include/asm/qspinlock_paravirt.h [new file with mode: 0644]
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c

index 90b1b54f4f38c1f214287ed9a77853d5df3c1c9c..50ec043a920da54aa8bab0b6e5e9db2a6bec5208 100644 (file)
@@ -667,7 +667,7 @@ config PARAVIRT_DEBUG
 config PARAVIRT_SPINLOCKS
        bool "Paravirtualization layer for spinlocks"
        depends on PARAVIRT && SMP
-       select UNINLINE_SPIN_UNLOCK
+       select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCK
        ---help---
          Paravirtualized spinlocks allow a pvops backend to replace the
          spinlock implementation with something virtualization-friendly
index 8957810ad7d1e348dd6315242c3411a863745be6..266c35381b624933c6efa48f98fa9450710ea695 100644 (file)
@@ -712,6 +712,31 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 
+#ifdef CONFIG_QUEUED_SPINLOCK
+
+static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+                                                       u32 val)
+{
+       PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+       PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
+}
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+       PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+       PVOP_VCALL1(pv_lock_ops.kick, cpu);
+}
+
+#else /* !CONFIG_QUEUED_SPINLOCK */
+
 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
                                                        __ticket_t ticket)
 {
@@ -724,7 +749,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
        PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
 }
 
-#endif
+#endif /* CONFIG_QUEUED_SPINLOCK */
+
+#endif /* SMP && PARAVIRT_SPINLOCKS */
 
 #ifdef CONFIG_X86_32
 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
index f7b0b5c112f28cc89c524231e9022b4560158b48..76cd68426af8f2b26c0458324a68550ada47c2e8 100644 (file)
@@ -333,9 +333,19 @@ struct arch_spinlock;
 typedef u16 __ticket_t;
 #endif
 
+struct qspinlock;
+
 struct pv_lock_ops {
+#ifdef CONFIG_QUEUED_SPINLOCK
+       void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
+       struct paravirt_callee_save queued_spin_unlock;
+
+       void (*wait)(u8 *ptr, u8 val);
+       void (*kick)(int cpu);
+#else /* !CONFIG_QUEUED_SPINLOCK */
        struct paravirt_callee_save lock_spinning;
        void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
+#endif /* !CONFIG_QUEUED_SPINLOCK */
 };
 
 /* This contains all the paravirt structures: we get a convenient
index f079b7020e3f54410c1d39d904d3dd4f023d330a..9d51fae1cba345e5cf01387a6c16207a6b5be872 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
 
 #define        queued_spin_unlock queued_spin_unlock
 /**
  *
  * A smp_store_release() on the least-significant byte.
  */
-static inline void queued_spin_unlock(struct qspinlock *lock)
+static inline void native_queued_spin_unlock(struct qspinlock *lock)
 {
        smp_store_release((u8 *)lock, 0);
 }
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+       pv_queued_spin_lock_slowpath(lock, val);
+}
+
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+       pv_queued_spin_unlock(lock);
+}
+#else
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+       native_queued_spin_unlock(lock);
+}
+#endif
+
 #define virt_queued_spin_lock virt_queued_spin_lock
 
 static inline bool virt_queued_spin_lock(struct qspinlock *lock)
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
new file mode 100644 (file)
index 0000000..b002e71
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_QSPINLOCK_PARAVIRT_H
+#define __ASM_QSPINLOCK_PARAVIRT_H
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
+
+#endif
index bbb6c7316341f806dc3cb3a5bdce52cbb28de2a3..a33f1eb15003f0e355551aa69c72b00aed8c1889 100644 (file)
@@ -8,11 +8,33 @@
 
 #include <asm/paravirt.h>
 
+#ifdef CONFIG_QUEUED_SPINLOCK
+__visible void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+       native_queued_spin_unlock(lock);
+}
+
+PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
+
+bool pv_is_native_spin_unlock(void)
+{
+       return pv_lock_ops.queued_spin_unlock.func ==
+               __raw_callee_save___native_queued_spin_unlock;
+}
+#endif
+
 struct pv_lock_ops pv_lock_ops = {
 #ifdef CONFIG_SMP
+#ifdef CONFIG_QUEUED_SPINLOCK
+       .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
+       .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
+       .wait = paravirt_nop,
+       .kick = paravirt_nop,
+#else /* !CONFIG_QUEUED_SPINLOCK */
        .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
        .unlock_kick = paravirt_nop,
-#endif
+#endif /* !CONFIG_QUEUED_SPINLOCK */
+#endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
 
index d9f32e6d6ab65476be60c34d06c522215b83a26b..e1b013696dde586ba1a80f23535ac07b1bee3e81 100644 (file)
@@ -12,6 +12,10 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
 DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
+#endif
+
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
 {
        /* arg in %eax, return in %eax */
@@ -24,6 +28,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
        return 0;
 }
 
+extern bool pv_is_native_spin_unlock(void);
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
@@ -47,14 +53,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_mmu_ops, write_cr3);
                PATCH_SITE(pv_cpu_ops, clts);
                PATCH_SITE(pv_cpu_ops, read_tsc);
-
-       patch_site:
-               ret = paravirt_patch_insns(ibuf, len, start, end);
-               break;
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+               case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
+                       if (pv_is_native_spin_unlock()) {
+                               start = start_pv_lock_ops_queued_spin_unlock;
+                               end   = end_pv_lock_ops_queued_spin_unlock;
+                               goto patch_site;
+                       }
+#endif
 
        default:
                ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
                break;
+
+patch_site:
+               ret = paravirt_patch_insns(ibuf, len, start, end);
+               break;
        }
 #undef PATCH_SITE
        return ret;
index a1da6737ba5b80c4ee636204d49d4813348ef903..e0fb41c8255bcd2f9ffbaa8566bcab630b2237fd 100644 (file)
@@ -21,6 +21,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
 DEF_NATIVE(, mov32, "mov %edi, %eax");
 DEF_NATIVE(, mov64, "mov %rdi, %rax");
 
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK)
+DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
+#endif
+
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
 {
        return paravirt_patch_insns(insnbuf, len,
@@ -33,6 +37,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
                                    start__mov64, end__mov64);
 }
 
+extern bool pv_is_native_spin_unlock(void);
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
@@ -59,14 +65,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_cpu_ops, clts);
                PATCH_SITE(pv_mmu_ops, flush_tlb_single);
                PATCH_SITE(pv_cpu_ops, wbinvd);
-
-       patch_site:
-               ret = paravirt_patch_insns(ibuf, len, start, end);
-               break;
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK)
+               case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
+                       if (pv_is_native_spin_unlock()) {
+                               start = start_pv_lock_ops_queued_spin_unlock;
+                               end   = end_pv_lock_ops_queued_spin_unlock;
+                               goto patch_site;
+                       }
+#endif
 
        default:
                ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
                break;
+
+patch_site:
+               ret = paravirt_patch_insns(ibuf, len, start, end);
+               break;
        }
 #undef PATCH_SITE
        return ret;
This page took 0.037239 seconds and 5 git commands to generate.