sched: remove the !PREEMPT_BKL code
authorIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:33 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:33 +0000 (21:08 +0100)
remove the !PREEMPT_BKL code.

this removes 160 lines of legacy code.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/hardirq.h
include/linux/smp_lock.h
kernel/Kconfig.preempt
kernel/sched.c
lib/kernel_lock.c

index 8d302298a161941ce40c6ed47411627d5f0229f8..2961ec788046627c823feb291d7d522f15982fbf 100644 (file)
 #define in_softirq()           (softirq_count())
 #define in_interrupt()         (irq_count())
 
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
-#else
-# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-#endif
+#define in_atomic()            ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
 
 #ifdef CONFIG_PREEMPT
 # define PREEMPT_CHECK_OFFSET 1
index 58962c51dee108d3f9e5d12a23ce40e6e9387608..aab3a4cff4e13da65a50ec50531ccfd8ed19444d 100644 (file)
@@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void);
                __release_kernel_lock();        \
 } while (0)
 
-/*
- * Non-SMP kernels will never block on the kernel lock,
- * so we are better off returning a constant zero from
- * reacquire_kernel_lock() so that the compiler can see
- * it at compile-time.
- */
-#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
-# define return_value_on_smp return
-#else
-# define return_value_on_smp
-#endif
-
 static inline int reacquire_kernel_lock(struct task_struct *task)
 {
        if (unlikely(task->lock_depth >= 0))
-               return_value_on_smp __reacquire_kernel_lock();
+               return __reacquire_kernel_lock();
        return 0;
 }
 
index 4420ef427f833d03451eab38edce89a267f8bdbc..0669b70fa6a3af35c4c67599575a8c2a12154fa4 100644 (file)
@@ -52,10 +52,6 @@ config PREEMPT
 
 endchoice
 
-config PREEMPT_BKL
-       def_bool y
-       depends on SMP || PREEMPT
-
 config RCU_TRACE
        bool "Enable tracing for RCU - currently stats in debugfs"
        select DEBUG_FS
index 22712b2e058a45f82f01f65ccb89c61117ccaa37..629614ad035838424ca7dfcf6bd67dd43bc7f8cf 100644 (file)
@@ -3955,10 +3955,9 @@ EXPORT_SYMBOL(schedule);
 asmlinkage void __sched preempt_schedule(void)
 {
        struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
        struct task_struct *task = current;
        int saved_lock_depth;
-#endif
+
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
         * we do not want to preempt the current task. Just return..
@@ -3974,14 +3973,10 @@ asmlinkage void __sched preempt_schedule(void)
                 * clear ->lock_depth so that schedule() doesnt
                 * auto-release the semaphore:
                 */
-#ifdef CONFIG_PREEMPT_BKL
                saved_lock_depth = task->lock_depth;
                task->lock_depth = -1;
-#endif
                schedule();
-#ifdef CONFIG_PREEMPT_BKL
                task->lock_depth = saved_lock_depth;
-#endif
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -4002,10 +3997,9 @@ EXPORT_SYMBOL(preempt_schedule);
 asmlinkage void __sched preempt_schedule_irq(void)
 {
        struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
        struct task_struct *task = current;
        int saved_lock_depth;
-#endif
+
        /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
@@ -4017,16 +4011,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
                 * clear ->lock_depth so that schedule() doesnt
                 * auto-release the semaphore:
                 */
-#ifdef CONFIG_PREEMPT_BKL
                saved_lock_depth = task->lock_depth;
                task->lock_depth = -1;
-#endif
                local_irq_enable();
                schedule();
                local_irq_disable();
-#ifdef CONFIG_PREEMPT_BKL
                task->lock_depth = saved_lock_depth;
-#endif
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -5241,11 +5231,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
        task_thread_info(idle)->preempt_count = 0;
-#endif
+
        /*
         * The idle tasks have their own, simple scheduling class:
         */
index f73e2f8c308f2e3b17e0cd21b5f0ab7131872f08..812dbf00844bc08a4a844bda6729377b1923a5c5 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 
-#ifdef CONFIG_PREEMPT_BKL
 /*
  * The 'big kernel semaphore'
  *
@@ -86,128 +85,6 @@ void __lockfunc unlock_kernel(void)
                up(&kernel_sem);
 }
 
-#else
-
-/*
- * The 'big kernel lock'
- *
- * This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel().  It is transparently dropped and reacquired
- * over schedule().  It is used to protect legacy code that hasn't
- * been migrated to a proper locking design yet.
- *
- * Don't use in new code.
- */
-static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
-
-
-/*
- * Acquire/release the underlying lock from the scheduler.
- *
- * This is called with preemption disabled, and should
- * return an error value if it cannot get the lock and
- * TIF_NEED_RESCHED gets set.
- *
- * If it successfully gets the lock, it should increment
- * the preemption count like any spinlock does.
- *
- * (This works on UP too - _raw_spin_trylock will never
- * return false in that case)
- */
-int __lockfunc __reacquire_kernel_lock(void)
-{
-       while (!_raw_spin_trylock(&kernel_flag)) {
-               if (test_thread_flag(TIF_NEED_RESCHED))
-                       return -EAGAIN;
-               cpu_relax();
-       }
-       preempt_disable();
-       return 0;
-}
-
-void __lockfunc __release_kernel_lock(void)
-{
-       _raw_spin_unlock(&kernel_flag);
-       preempt_enable_no_resched();
-}
-
-/*
- * These are the BKL spinlocks - we try to be polite about preemption. 
- * If SMP is not on (ie UP preemption), this all goes away because the
- * _raw_spin_trylock() will always succeed.
- */
-#ifdef CONFIG_PREEMPT
-static inline void __lock_kernel(void)
-{
-       preempt_disable();
-       if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
-               /*
-                * If preemption was disabled even before this
-                * was called, there's nothing we can be polite
-                * about - just spin.
-                */
-               if (preempt_count() > 1) {
-                       _raw_spin_lock(&kernel_flag);
-                       return;
-               }
-
-               /*
-                * Otherwise, let's wait for the kernel lock
-                * with preemption enabled..
-                */
-               do {
-                       preempt_enable();
-                       while (spin_is_locked(&kernel_flag))
-                               cpu_relax();
-                       preempt_disable();
-               } while (!_raw_spin_trylock(&kernel_flag));
-       }
-}
-
-#else
-
-/*
- * Non-preemption case - just get the spinlock
- */
-static inline void __lock_kernel(void)
-{
-       _raw_spin_lock(&kernel_flag);
-}
-#endif
-
-static inline void __unlock_kernel(void)
-{
-       /*
-        * the BKL is not covered by lockdep, so we open-code the
-        * unlocking sequence (and thus avoid the dep-chain ops):
-        */
-       _raw_spin_unlock(&kernel_flag);
-       preempt_enable();
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously, so we only need to
- * worry about other CPU's.
- */
-void __lockfunc lock_kernel(void)
-{
-       int depth = current->lock_depth+1;
-       if (likely(!depth))
-               __lock_kernel();
-       current->lock_depth = depth;
-}
-
-void __lockfunc unlock_kernel(void)
-{
-       BUG_ON(current->lock_depth < 0);
-       if (likely(--current->lock_depth < 0))
-               __unlock_kernel();
-}
-
-#endif
-
 EXPORT_SYMBOL(lock_kernel);
 EXPORT_SYMBOL(unlock_kernel);
 
This page took 0.042811 seconds and 5 git commands to generate.