WorkStruct: make allyesconfig
[deliverable/linux.git] / drivers / cpufreq / cpufreq_conservative.c
index a152d2c46be7592d9f9ea64af4088c984b44f047..5ef5ede5b8848e96c3d7d4ba00d9e7e8a11bc60f 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/types.h>
 #include <linux/fs.h>
 #include <linux/sysfs.h>
+#include <linux/cpu.h>
 #include <linux/sched.h>
 #include <linux/kmod.h>
 #include <linux/workqueue.h>
@@ -58,7 +59,7 @@ static unsigned int                           def_sampling_rate;
 #define MAX_SAMPLING_DOWN_FACTOR               (10)
 #define TRANSITION_LATENCY_LIMIT               (10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
 
 struct cpu_dbs_info_s {
        struct cpufreq_policy   *cur_policy;
@@ -72,8 +73,16 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;        /* number of CPUs using this policy */
 
+/*
+ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
+ * lock and dbs_mutex. cpu_hotplug lock should always be held before
+ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
+ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
+ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
+ * is recursive for the same process. -Venki
+ */
 static DEFINE_MUTEX    (dbs_mutex);
-static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
 
 struct dbs_tuners {
        unsigned int            sampling_rate;
@@ -88,6 +97,8 @@ static struct dbs_tuners dbs_tuners_ins = {
        .up_threshold           = DEF_FREQUENCY_UP_THRESHOLD,
        .down_threshold         = DEF_FREQUENCY_DOWN_THRESHOLD,
        .sampling_down_factor   = DEF_SAMPLING_DOWN_FACTOR,
+       .ignore_nice            = 0,
+       .freq_step              = 5,
 };
 
 static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -174,8 +185,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
        ret = sscanf (buf, "%u", &input);
 
        mutex_lock(&dbs_mutex);
-       if (ret != 1 || input > 100 || input < 0 ||
-                       input <= dbs_tuners_ins.down_threshold) {
+       if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) {
                mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
@@ -194,8 +204,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
        ret = sscanf (buf, "%u", &input);
 
        mutex_lock(&dbs_mutex);
-       if (ret != 1 || input > 100 || input < 0 ||
-                       input >= dbs_tuners_ins.up_threshold) {
+       if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) {
                mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
@@ -411,20 +420,21 @@ static void dbs_check_cpu(int cpu)
        }
 }
 
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 { 
        int i;
+       lock_cpu_hotplug();
        mutex_lock(&dbs_mutex);
        for_each_online_cpu(i)
                dbs_check_cpu(i);
        schedule_delayed_work(&dbs_work, 
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        mutex_unlock(&dbs_mutex);
+       unlock_cpu_hotplug();
 } 
 
 static inline void dbs_timer_init(void)
 {
-       INIT_WORK(&dbs_work, do_dbs_timer, NULL);
        schedule_delayed_work(&dbs_work,
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        return;
@@ -490,8 +500,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                def_sampling_rate = MIN_STAT_SAMPLING_RATE;
 
                        dbs_tuners_ins.sampling_rate = def_sampling_rate;
-                       dbs_tuners_ins.ignore_nice = 0;
-                       dbs_tuners_ins.freq_step = 5;
 
                        dbs_timer_init();
                }
This page took 0.037812 seconds and 5 git commands to generate.